diff --git a/benchmarks/profiler_benchmark/profiler_bench.py b/benchmarks/profiler_benchmark/profiler_bench.py index 5c1f2597415cb..435b8724635da 100644 --- a/benchmarks/profiler_benchmark/profiler_bench.py +++ b/benchmarks/profiler_benchmark/profiler_bench.py @@ -46,8 +46,7 @@ def parallel_task(x): print("No CUDA available") sys.exit() - print("Payload: {}, {} iterations; timer min. runtime = {}\n".format( - args.workload, args.internal_iter, args.timer_min_run_time)) + print(f"Payload: {args.workload}, {args.internal_iter} iterations; timer min. runtime = {args.timer_min_run_time}\n") INTERNAL_ITER = args.internal_iter for profiling_enabled in [False, True]: diff --git a/functorch/examples/dp_cifar10/cifar10_transforms.py b/functorch/examples/dp_cifar10/cifar10_transforms.py index 600931d50ec9d..863896983a08a 100644 --- a/functorch/examples/dp_cifar10/cifar10_transforms.py +++ b/functorch/examples/dp_cifar10/cifar10_transforms.py @@ -115,7 +115,7 @@ def compute_loss_and_output(weights, image, target): # is not to be differentiated. `f'` returns the gradient w.r.t. the loss, # the loss, and the auxiliary value. grads_loss_output = grad_and_value(compute_loss_and_output, has_aux=True) - weights = {k: v for k, v in model.named_parameters()} + weights = dict(model.named_parameters()) # detaching weights since we don't need to track gradients outside of transforms # and this is more performant diff --git a/scripts/get_python_cmake_flags.py b/scripts/get_python_cmake_flags.py index 131f5b7c19e5b..05562510d050d 100644 --- a/scripts/get_python_cmake_flags.py +++ b/scripts/get_python_cmake_flags.py @@ -19,8 +19,8 @@ import sys flags = [ - '-DPYTHON_EXECUTABLE:FILEPATH={}'.format(sys.executable), - '-DPYTHON_INCLUDE_DIR={}'.format(sysconfig.get_path('include')), + f'-DPYTHON_EXECUTABLE:FILEPATH={sys.executable}', + f"-DPYTHON_INCLUDE_DIR={sysconfig.get_path('include')}", ] print(' '.join(flags), end='') diff --git a/scripts/model_zoo/update-caffe2-models.py b/scripts/model_zoo/update-caffe2-models.py index 7f9c8e9815dba..9f48920e6a8f3 100755 --- a/scripts/model_zoo/update-caffe2-models.py +++ b/scripts/model_zoo/update-caffe2-models.py @@ -30,7 +30,7 @@ def _download(self, model): # (Sep 17, 2017) downloadFromURLToFile(url, dest) except Exception as e: - print("Abort: {reason}".format(reason=e)) + print(f"Abort: {e}") print("Cleaning up...") deleteDirectory(model_dir) exit(1) @@ -53,20 +53,20 @@ def _prepare_model_data(self, model): if os.path.exists(model_dir): return os.makedirs(model_dir) - url = 'https://s3.amazonaws.com/download.onnx/models/{}.tar.gz'.format(model) + url = f'https://s3.amazonaws.com/download.onnx/models/{model}.tar.gz' # On Windows, NamedTemporaryFile cannot be opened for a # second time download_file = tempfile.NamedTemporaryFile(delete=False) try: download_file.close() - print('Start downloading model {} from {}'.format(model, url)) + print(f'Start downloading model {model} from {url}') urlretrieve(url, download_file.name) print('Done') with tarfile.open(download_file.name) as t: t.extractall(models_dir) except Exception as e: - print('Failed to prepare data for model {}: {}'.format(model, e)) + print(f'Failed to prepare data for model {model}: {e}') raise finally: os.remove(download_file.name) @@ -133,7 +133,7 @@ def upload_models(): 's3', 'cp', model + '.tar.gz', - "s3://download.onnx/models/{}.tar.gz".format(model), + f"s3://download.onnx/models/{model}.tar.gz", '--acl', 'public-read' ], cwd=onnx_models_dir) diff --git a/scripts/model_zoo/update-models-from-caffe2.py b/scripts/model_zoo/update-models-from-caffe2.py index 9e408d6808f17..537fc03a60acb 100644 --- a/scripts/model_zoo/update-models-from-caffe2.py +++ b/scripts/model_zoo/update-models-from-caffe2.py @@ -51,20 +51,20 @@ def upload_onnx_model(model_name, zoo_dir, backup=False, only_local=False): model_dir = os.path.join(zoo_dir, model_name) suffix = '-backup' if backup else '' if backup: - print('Backing up the previous version of ONNX model {}...'.format(model_name)) - rel_file_name = '{}{}.tar.gz'.format(model_name, suffix) + print(f'Backing up the previous version of ONNX model {model_name}...') + rel_file_name = f'{model_name}{suffix}.tar.gz' abs_file_name = os.path.join(zoo_dir, rel_file_name) - print('Compressing {} model to {}'.format(model_name, abs_file_name)) + print(f'Compressing {model_name} model to {abs_file_name}') with tarfile.open(abs_file_name, 'w:gz') as f: f.add(model_dir, arcname=model_name) file_size = os.stat(abs_file_name).st_size - print('Uploading {} ({} MB) to s3 cloud...'.format(abs_file_name, float(file_size) / 1024 / 1024)) + print(f'Uploading {abs_file_name} ({float(file_size) / 1024 / 1024} MB) to s3 cloud...') client = boto3.client('s3', 'us-east-1') transfer = boto3.s3.transfer.S3Transfer(client) - transfer.upload_file(abs_file_name, 'download.onnx', 'models/latest/{}'.format(rel_file_name), + transfer.upload_file(abs_file_name, 'download.onnx', f'models/latest/{rel_file_name}', extra_args={'ACL': 'public-read'}) - print('Successfully uploaded {} to s3!'.format(rel_file_name)) + print(f'Successfully uploaded {rel_file_name} to s3!') def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False): @@ -75,7 +75,7 @@ def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False): return else: shutil.rmtree(model_dir) - url = 'https://s3.amazonaws.com/download.onnx/models/latest/{}.tar.gz'.format(model_name) + url = f'https://s3.amazonaws.com/download.onnx/models/latest/{model_name}.tar.gz' download_file = tempfile.NamedTemporaryFile(delete=False) try: @@ -84,10 +84,10 @@ def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False): model_name, url, download_file.name)) urlretrieve(url, download_file.name) with tarfile.open(download_file.name) as t: - print('Extracting ONNX model {} to {} ...\n'.format(model_name, zoo_dir)) + print(f'Extracting ONNX model {model_name} to {zoo_dir} ...\n') t.extractall(zoo_dir) except Exception as e: - print('Failed to download/backup data for ONNX model {}: {}'.format(model_name, e)) + print(f'Failed to download/backup data for ONNX model {model_name}: {e}') if not os.path.exists(model_dir): os.makedirs(model_dir) finally: @@ -119,7 +119,7 @@ def download_caffe2_model(model_name, zoo_dir, use_cache=True): # (Sep 17, 2017) downloadFromURLToFile(url, dest) except Exception as e: - print("Abort: {reason}".format(reason=e)) + print(f"Abort: {e}") print("Cleaning up...") deleteDirectory(model_dir) raise @@ -131,14 +131,14 @@ def caffe2_to_onnx(caffe2_model_name, caffe2_model_dir): with open(os.path.join(caffe2_model_dir, 'init_net.pb'), 'rb') as f: caffe2_init_proto.ParseFromString(f.read()) - caffe2_init_proto.name = '{}_init'.format(caffe2_model_name) + caffe2_init_proto.name = f'{caffe2_model_name}_init' with open(os.path.join(caffe2_model_dir, 'predict_net.pb'), 'rb') as f: caffe2_predict_proto.ParseFromString(f.read()) caffe2_predict_proto.name = caffe2_model_name with open(os.path.join(caffe2_model_dir, 'value_info.json'), 'rb') as f: value_info = json.loads(f.read()) - print('Converting Caffe2 model {} in {} to ONNX format'.format(caffe2_model_name, caffe2_model_dir)) + print(f'Converting Caffe2 model {caffe2_model_name} in {caffe2_model_dir} to ONNX format') onnx_model = caffe2.python.onnx.frontend.caffe2_net_to_onnx_model( init_net=caffe2_init_proto, predict_net=caffe2_predict_proto, @@ -245,7 +245,7 @@ def onnx_verify(onnx_model, inputs, ref_outputs): for onnx_model_name in model_mapping: c2_model_name = model_mapping[onnx_model_name] - print('####### Processing ONNX model {} ({} in Caffe2) #######'.format(onnx_model_name, c2_model_name)) + print(f'####### Processing ONNX model {onnx_model_name} ({c2_model_name} in Caffe2) #######') download_caffe2_model(c2_model_name, caffe2_zoo_dir, use_cache=use_cache) download_onnx_model(onnx_model_name, onnx_zoo_dir, use_cache=use_cache, only_local=only_local) @@ -261,19 +261,19 @@ def onnx_verify(onnx_model, inputs, ref_outputs): onnx_model, c2_init_net, c2_predict_net = caffe2_to_onnx(c2_model_name, os.path.join(caffe2_zoo_dir, c2_model_name)) - print('Deleteing old ONNX {} model...'.format(onnx_model_name)) + print(f'Deleteing old ONNX {onnx_model_name} model...') for f in glob.glob(os.path.join(onnx_model_dir, 'model*'.format(onnx_model_name))): os.remove(f) - print('Serializing generated ONNX {} model ...'.format(onnx_model_name)) + print(f'Serializing generated ONNX {onnx_model_name} model ...') with open(os.path.join(onnx_model_dir, 'model.onnx'), 'wb') as file: file.write(onnx_model.SerializeToString()) - print('Verifying model {} with ONNX model checker...'.format(onnx_model_name)) + print(f'Verifying model {onnx_model_name} with ONNX model checker...') onnx.checker.check_model(onnx_model) total_existing_data_set = 0 - print('Verifying model {} with existing test data...'.format(onnx_model_name)) + print(f'Verifying model {onnx_model_name} with existing test data...') for f in glob.glob(os.path.join(onnx_model_dir, '*.npz')): test_data = np.load(f, encoding='bytes') inputs = list(test_data['inputs']) @@ -285,41 +285,41 @@ def onnx_verify(onnx_model, inputs, ref_outputs): inputs_num = len(glob.glob(os.path.join(f, 'input_*.pb'))) for i in range(inputs_num): tensor = onnx.TensorProto() - with open(os.path.join(f, 'input_{}.pb'.format(i)), 'rb') as pf: + with open(os.path.join(f, f'input_{i}.pb'), 'rb') as pf: tensor.ParseFromString(pf.read()) inputs.append(numpy_helper.to_array(tensor)) ref_outputs = [] ref_outputs_num = len(glob.glob(os.path.join(f, 'output_*.pb'))) for i in range(ref_outputs_num): tensor = onnx.TensorProto() - with open(os.path.join(f, 'output_{}.pb'.format(i)), 'rb') as pf: + with open(os.path.join(f, f'output_{i}.pb'), 'rb') as pf: tensor.ParseFromString(pf.read()) ref_outputs.append(numpy_helper.to_array(tensor)) onnx_verify(onnx_model, inputs, ref_outputs) total_existing_data_set += 1 starting_index = 0 - while os.path.exists(os.path.join(onnx_model_dir, 'test_data_set_{}'.format(starting_index))): + while os.path.exists(os.path.join(onnx_model_dir, f'test_data_set_{starting_index}')): starting_index += 1 if total_existing_data_set == 0 and add_test_data == 0: add_test_data = 3 total_existing_data_set = 3 - print('Generating {} sets of new test data...'.format(add_test_data)) + print(f'Generating {add_test_data} sets of new test data...') for i in range(starting_index, add_test_data + starting_index): - data_dir = os.path.join(onnx_model_dir, 'test_data_set_{}'.format(i)) + data_dir = os.path.join(onnx_model_dir, f'test_data_set_{i}') os.makedirs(data_dir) inputs = generate_test_input_data(onnx_model, 255) ref_outputs = generate_test_output_data(c2_init_net, c2_predict_net, inputs) onnx_verify(onnx_model, inputs, ref_outputs) for index, input in enumerate(inputs): tensor = numpy_helper.from_array(input[1]) - with open(os.path.join(data_dir, 'input_{}.pb'.format(index)), 'wb') as file: + with open(os.path.join(data_dir, f'input_{index}.pb'), 'wb') as file: file.write(tensor.SerializeToString()) for index, output in enumerate(ref_outputs): tensor = numpy_helper.from_array(output) - with open(os.path.join(data_dir, 'output_{}.pb'.format(index)), 'wb') as file: + with open(os.path.join(data_dir, f'output_{index}.pb'), 'wb') as file: file.write(tensor.SerializeToString()) del onnx_model diff --git a/scripts/release_notes/common.py b/scripts/release_notes/common.py index bddac91224af4..26317e8ddad72 100644 --- a/scripts/release_notes/common.py +++ b/scripts/release_notes/common.py @@ -205,7 +205,7 @@ def run_query(query): if request.status_code == 200: return request.json() else: - raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, request.json())) + raise Exception(f"Query failed to run by returning code of {request.status_code}. {request.json()}") def github_data(pr_number): diff --git a/setup.py b/setup.py index 32c77a5dda417..41e9ad6e1aca1 100644 --- a/setup.py +++ b/setup.py @@ -224,7 +224,7 @@ python_min_version = (3, 8, 0) python_min_version_str = '.'.join(map(str, python_min_version)) if sys.version_info < python_min_version: - print("You are using Python {}. Python >={} is required.".format(platform.python_version(), + print("You are using Python {}. Python >={} is required.".format(platform.python_version(), # noqa: UP032 python_min_version_str)) sys.exit(-1) diff --git a/test/distributed/test_c10d_nccl.py b/test/distributed/test_c10d_nccl.py index 5791b99374653..ae97edee81ede 100644 --- a/test/distributed/test_c10d_nccl.py +++ b/test/distributed/test_c10d_nccl.py @@ -1906,9 +1906,7 @@ def first_bucket_size(ddp_bucket_mb): ): with first_bucket_size(bucketsize): model_msg = ( - "rank = {} formats = {} dtypes = {} bucketsize = {} ".format( - self.rank, formats, dtypes, bucketsize - ) + f"rank = {self.rank} formats = {formats} dtypes = {dtypes} bucketsize = {bucketsize} " ) try: m = ConvNet(layer_devs, formats, dtypes) @@ -2387,9 +2385,7 @@ def test_ddp_weight_sharing(self): "mismatch at " + name + ".grad for " - + "set_to_none = {}, use_bucket_view = {}".format( - try_set_to_none, use_bucket_view - ), + + f"set_to_none = {try_set_to_none}, use_bucket_view = {use_bucket_view}", ) @requires_nccl() diff --git a/test/distributed/test_c10d_spawn.py b/test/distributed/test_c10d_spawn.py index 8ac496ea6c063..7c09a8af7abc7 100644 --- a/test/distributed/test_c10d_spawn.py +++ b/test/distributed/test_c10d_spawn.py @@ -57,9 +57,7 @@ def _test_multiprocess(self, f, shared_tensors, init_pg, n_output): self.assertEqual( expected, result, - msg=( - "Expect rank {} to receive tensor {} but got {}." - ).format(pid, expected, result) + msg=f"Expect rank {pid} to receive tensor {expected} but got {result}." ) for _ in range(ws): diff --git a/test/distributions/test_distributions.py b/test/distributions/test_distributions.py index 2f4d256516c84..2e5197f3b5bb4 100644 --- a/test/distributions/test_distributions.py +++ b/test/distributions/test_distributions.py @@ -912,8 +912,7 @@ def test_sample_detached(self): dist = Dist(**param) sample = dist.sample() self.assertFalse(sample.requires_grad, - msg='{} example {}/{}, .sample() is not detached'.format( - Dist.__name__, i + 1, len(params))) + msg=f'{Dist.__name__} example {i + 1}/{len(params)}, .sample() is not detached') @skipIfTorchDynamo("Not a TorchDynamo suitable test") def test_rsample_requires_grad(self): @@ -926,8 +925,7 @@ def test_rsample_requires_grad(self): continue sample = dist.rsample() self.assertTrue(sample.requires_grad, - msg='{} example {}/{}, .rsample() does not require grad'.format( - Dist.__name__, i + 1, len(params))) + msg=f'{Dist.__name__} example {i + 1}/{len(params)}, .rsample() does not require grad') def test_enumerate_support_type(self): for Dist, params in EXAMPLES: @@ -4377,8 +4375,7 @@ def test_params_constraints(self): if is_dependent(constraint): continue - message = '{} example {}/{} parameter {} = {}'.format( - Dist.__name__, i + 1, len(params), name, value) + message = f'{Dist.__name__} example {i + 1}/{len(params)} parameter {name} = {value}' self.assertTrue(constraint.check(value).all(), msg=message) def test_support_constraints(self): @@ -4388,8 +4385,7 @@ def test_support_constraints(self): dist = Dist(**param) value = dist.sample() constraint = dist.support - message = '{} example {}/{} sample = {}'.format( - Dist.__name__, i + 1, len(params), value) + message = f'{Dist.__name__} example {i + 1}/{len(params)} sample = {value}' self.assertEqual(constraint.event_dim, len(dist.event_shape), msg=message) ok = constraint.check(value) self.assertEqual(ok.shape, dist.batch_shape, msg=message) diff --git a/test/onnx/pytorch_helper.py b/test/onnx/pytorch_helper.py index aca0e2c9a2b79..2191a3bb13c58 100644 --- a/test/onnx/pytorch_helper.py +++ b/test/onnx/pytorch_helper.py @@ -68,9 +68,7 @@ def PyTorchModule(helper, model, sample_arguments, caffe2_inputs, prefix_name=No if len(uninitialized_inputs) != len(caffe2_inputs): raise ValueError( - "Expected {} inputs but found {}".format( - len(uninitialized_inputs), len(caffe2_inputs) - ) + f"Expected {len(uninitialized_inputs)} inputs but found {len(caffe2_inputs)}" ) def remap_blob_name(name): diff --git a/test/onnx/test_operators.py b/test/onnx/test_operators.py index 7bc47e8cefc44..dc9a53a2c91f2 100644 --- a/test/onnx/test_operators.py +++ b/test/onnx/test_operators.py @@ -105,9 +105,7 @@ def assertONNX(self, f, args, params=None, **kwargs): # Assume: # 1) the old test should be delete before the test. # 2) only one assertONNX in each test, otherwise will override the data. - assert not os.path.exists(output_dir), "{} should not exist!".format( - output_dir - ) + assert not os.path.exists(output_dir), f"{output_dir} should not exist!" os.makedirs(output_dir) with open(os.path.join(output_dir, "model.onnx"), "wb") as file: file.write(model_def.SerializeToString()) diff --git a/test/onnx_caffe2/export_onnx_tests_generator.py b/test/onnx_caffe2/export_onnx_tests_generator.py index 39b18d1e9fa6c..e81f1277d8ee9 100644 --- a/test/onnx_caffe2/export_onnx_tests_generator.py +++ b/test/onnx_caffe2/export_onnx_tests_generator.py @@ -145,14 +145,10 @@ def convert_tests(testcases, sets=1): failed += 1 print( - "Collect {} test cases from PyTorch repo, failed to export {} cases.".format( - len(testcases), failed - ) + f"Collect {len(testcases)} test cases from PyTorch repo, failed to export {failed} cases." ) print( - "PyTorch converted cases are stored in {}.".format( - onnx_test_common.pytorch_converted_dir - ) + f"PyTorch converted cases are stored in {onnx_test_common.pytorch_converted_dir}." ) print_stats(FunctionalModule_nums, nn_module) diff --git a/test/optim/test_lrscheduler.py b/test/optim/test_lrscheduler.py index 7162ed74fb11b..fff4b60e4fa7a 100644 --- a/test/optim/test_lrscheduler.py +++ b/test/optim/test_lrscheduler.py @@ -1989,9 +1989,7 @@ def _test_get_last_lr(self, schedulers, targets, epochs=10): self.assertEqual( target, result, - msg="LR is wrong in epoch {}: expected {}, got {}".format( - epoch, t, r - ), + msg=f"LR is wrong in epoch {epoch}: expected {t}, got {r}", atol=1e-5, rtol=0, ) diff --git a/test/quantization/core/test_docs.py b/test/quantization/core/test_docs.py index ab41c51388bae..82ef4d277805b 100644 --- a/test/quantization/core/test_docs.py +++ b/test/quantization/core/test_docs.py @@ -62,9 +62,7 @@ def get_correct_path(path_from_pytorch): if "\n" not in unique_identifier: unique_identifier += "\n" - assert unique_identifier in content, "could not find {} in {}".format( - unique_identifier, path_to_file - ) + assert unique_identifier in content, f"could not find {unique_identifier} in {path_to_file}" # get index of first line of code line_num_start = content.index(unique_identifier) + 1 diff --git a/test/quantization/core/test_quantized_module.py b/test/quantization/core/test_quantized_module.py index ad1e391b1b6ad..3fb05db64b9e5 100644 --- a/test/quantization/core/test_quantized_module.py +++ b/test/quantization/core/test_quantized_module.py @@ -1073,8 +1073,7 @@ def test_layer_norm(self): qY = quant_mod(qX) self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(), - msg="LayerNorm module API failed, qY_ref\n{} vs qY\n{}" - .format(qY_ref, qY)) + msg=f"LayerNorm module API failed, qY_ref\n{qY_ref} vs qY\n{qY}") def test_group_norm(self): """Tests the correctness of the groupnorm module. @@ -1104,8 +1103,7 @@ def test_group_norm(self): qY = quant_mod(qX) self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(), - msg="GroupNorm module API failed, qY_ref\n{} vs qY\n{}" - .format(qY_ref, qY)) + msg=f"GroupNorm module API failed, qY_ref\n{qY_ref} vs qY\n{qY}") def test_instance_norm(self): """Tests the correctness of the instancenorm{n}d modules. @@ -1145,8 +1143,7 @@ def test_instance_norm(self): self.assertEqual( qY_ref.int_repr().numpy(), qY.int_repr().numpy(), - msg="InstanceNorm module API failed, qY_ref\n{} vs qY\n{}" - .format(qY_ref, qY)) + msg=f"InstanceNorm module API failed, qY_ref\n{qY_ref} vs qY\n{qY}") def _test_activation_module_impl(self, name, float_module_class, quantized_module_class, extra_kwargs): """Tests the correctness of the ELU module. @@ -1173,8 +1170,7 @@ def _test_activation_module_impl(self, name, float_module_class, quantized_modul quant_mod = quantized_module_class(y_scale, y_zero_point, **extra_kwargs) qY = quant_mod(qX) self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(), - msg="{} module API failed, qY_ref\n{} vs qY\n{}" - .format(name, qY_ref, qY)) + msg=f"{name} module API failed, qY_ref\n{qY_ref} vs qY\n{qY}") def _test_leaky_relu_serialization(self): scale_original = 10.0 / 256 @@ -1371,8 +1367,7 @@ def test_channel_shuffle(self): qY = quant_mod(qX) self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(), - msg="ChannelShuffle module API failed, qY_ref\n{} vs qY\n{}" - .format(qY_ref, qY)) + msg=f"ChannelShuffle module API failed, qY_ref\n{qY_ref} vs qY\n{qY}") @skipIfNoONEDNN def test_linear_leaky_relu(self): diff --git a/test/quantization/core/test_quantized_op.py b/test/quantization/core/test_quantized_op.py index 1e99934976148..b12b9baf76c41 100644 --- a/test/quantization/core/test_quantized_op.py +++ b/test/quantization/core/test_quantized_op.py @@ -223,9 +223,7 @@ def _test_activation_function(self, X, fn_name, test_configs): # Finds qY using in-place or non-in-place quantized operators. qY = q_op(qX, **extra_kwargs) - self.assertEqual(qY, qY_hat, msg='{} - {} failed: ({} vs. {})'.format( - fn_name, q_op, qY, qY_hat - )) + self.assertEqual(qY, qY_hat, msg=f'{fn_name} - {q_op} failed: ({qY} vs. {qY_hat})') """Tests the correctness of the quantized::relu op.""" @override_qengines @@ -2202,8 +2200,7 @@ def test_interpolate(self, X, size, mode, scale_factor, align_corners, nhwc_layo qX_hat = op(qX, size=size, scale_factor=scale_factor, mode=mode, align_corners=align_corners) self.assertEqual(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0, - msg="{} results are off: qX_hat={} X_ref={}" - .format(name, qX_hat.int_repr(), X_ref), + msg=f"{name} results are off: qX_hat={qX_hat.int_repr()} X_ref={X_ref}", exact_dtype=False) self.assertEqual(scale, qX_hat.q_scale(), msg=error_message.format(name + '.scale', scale, qX_hat.q_scale())) @@ -2257,8 +2254,7 @@ def test_interpolate3d(self, X, size, mode, scale_factor, align_corners, nhwc_la qX_hat = op(qX, size=size, scale_factor=scale_factor, mode=mode, align_corners=align_corners) self.assertEqual(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0, - msg="{} results are off: qX_hat={}, X_ref={}" - .format(name, qX_hat.int_repr(), X_ref), exact_dtype=False) + msg=f"{name} results are off: qX_hat={qX_hat.int_repr()}, X_ref={X_ref}", exact_dtype=False) self.assertEqual(scale, qX_hat.q_scale(), msg=error_message.format(name + '.scale', scale, qX_hat.q_scale())) self.assertEqual(zero_point, qX_hat.q_zero_point(), diff --git a/test/quantization/fx/test_quantize_fx.py b/test/quantization/fx/test_quantize_fx.py index a4e9577b8f517..49c79cd9663e0 100644 --- a/test/quantization/fx/test_quantize_fx.py +++ b/test/quantization/fx/test_quantize_fx.py @@ -3916,9 +3916,7 @@ def _check_node_not_observed(model, arg_node, node): elif arg_node.op == "call_module": self.assertTrue( not _is_activation_post_process(getattr(model, arg_node.target)), - "Arg: {} of node: {} is observed but is not a float tensor".format( - arg_node, node - ), + f"Arg: {arg_node} of node: {node} is observed but is not a float tensor", ) for node in model.graph.nodes: diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py index 06dc70addcb55..ab4afdd0e38a1 100644 --- a/test/quantization/pt2e/test_quantize_pt2e.py +++ b/test/quantization/pt2e/test_quantize_pt2e.py @@ -821,9 +821,7 @@ def derive_qparams_fn( ) -> Tuple[Tensor, Tensor]: assert ( len(obs_or_fqs) == 2 - ), "Expecting two obs/fqs, one for activation and one for weight, got: {}".format( - len(obs_or_fq) - ) + ), f"Expecting two obs/fqs, one for activation and one for weight, got: {len(obs_or_fq)}" act_obs_or_fq = obs_or_fqs[0] weight_obs_or_fq = obs_or_fqs[1] act_scale, act_zp = act_obs_or_fq.calculate_qparams() diff --git a/test/run_test.py b/test/run_test.py index fdcfc4a7033e7..b81544d8b4d63 100755 --- a/test/run_test.py +++ b/test/run_test.py @@ -737,9 +737,7 @@ def test_distributed(test_module, test_directory, options): init_str = "with {} init_method" with_init = init_str.format("file" if with_init_file else "env") print_to_stderr( - "Running distributed tests for the {} backend {}".format( - backend, with_init - ) + f"Running distributed tests for the {backend} backend {with_init}" ) old_environ = dict(os.environ) os.environ["TEMP_DIR"] = tmp_dir diff --git a/test/test_autocast.py b/test/test_autocast.py index c64b3829a105c..7e449df11fefc 100644 --- a/test/test_autocast.py +++ b/test/test_autocast.py @@ -41,8 +41,7 @@ def cast(val, to_type): output = getattr(module, op)(*args, **add_kwargs) if isinstance(output, torch.Tensor): self.assertTrue(out_type == output.dtype, - "autocast for torch.{} produced {}, should produce {}" - .format(op, output.dtype, out_type)) + f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}") # Try Tensor.* variant: if hasattr(torch.Tensor, op): output_method = getattr(args[0], op)(*args[1:], **add_kwargs) @@ -52,8 +51,7 @@ def cast(val, to_type): .format(op, output_method.dtype, out_type)) self.assertTrue((output is not None) or (output_method is not None), - "{} not found as an attribute on either Tensor or the requested module {}".format( - op, module)) + f"{op} not found as an attribute on either Tensor or the requested module {module}") # Accounts for ops that return Tensors, iterables, and other non-Tensors. # For example, lstm_cell returns a tuple and equal returns bool. diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py index 054723b84551c..0785666556364 100644 --- a/test/test_binary_ufuncs.py +++ b/test/test_binary_ufuncs.py @@ -4449,9 +4449,7 @@ def test(self, device, dtype): for op in tensor_binary_ops: test_name = f"test_{op}_not_implemented" - assert not hasattr(cls, test_name), "{} already in {}".format( - test_name, cls.__name__ - ) + assert not hasattr(cls, test_name), f"{test_name} already in {cls.__name__}" setattr(cls, test_name, create_test_func(op)) diff --git a/test/test_cuda.py b/test/test_cuda.py index 415e31a3cd000..2a2db1e59c78c 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -1738,8 +1738,7 @@ def cast(val, to_type): output = getattr(module, op)(*args, **add_kwargs) if isinstance(output, torch.Tensor): self.assertTrue(out_type == output.dtype, - "autocast for torch.{} produced {}, should produce {}" - .format(op, output.dtype, out_type)) + f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}") # Try Tensor.* variant: if hasattr(torch.Tensor, op): @@ -1750,8 +1749,7 @@ def cast(val, to_type): .format(op, output_method.dtype, out_type)) self.assertTrue((output is not None) or (output_method is not None), - "{} not found as an attribute on either Tensor or the requested module {}".format( - op, module)) + f"{op} not found as an attribute on either Tensor or the requested module {module}") # Accounts for ops that return Tensors, iterables, and other non-Tensors. # For example, lstm_cell returns a tuple and equal returns bool. diff --git a/test/test_dataloader.py b/test/test_dataloader.py index f4a5215094829..4747bb3cfe279 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -2056,8 +2056,7 @@ def fail(reason): if exit_method == 'loader_error': if not isinstance(loader_p.exception, RuntimeError) or \ 'Loader error' not in str(loader_p.exception): - fail('loader process did not raise expected exception, but had {}'.format( - loader_p.exception)) + fail(f'loader process did not raise expected exception, but had {loader_p.exception}') elif exit_method == 'worker_kill': if isinstance(loader_p.exception, RuntimeError): if 'DataLoader worker (pid' not in str(loader_p.exception): @@ -2073,13 +2072,11 @@ def fail(reason): # After all, we are happy as long as it terminates. pass else: - fail('loader process did not raise expected exception, but had {}'.format( - loader_p.exception)) + fail(f'loader process did not raise expected exception, but had {loader_p.exception}') elif exit_method == 'worker_error': if not isinstance(loader_p.exception, RuntimeError) or \ 'Worker error' not in str(loader_p.exception): - fail('loader process did not raise expected exception, but had {}'.format( - loader_p.exception)) + fail(f'loader process did not raise expected exception, but had {loader_p.exception}') finally: loader_p.terminate() diff --git a/test/test_dispatch.py b/test/test_dispatch.py index 7a0da7e3b553d..e98385a8ce3a2 100644 --- a/test/test_dispatch.py +++ b/test/test_dispatch.py @@ -116,13 +116,11 @@ def check_invariants(actual_provenance): ) self.assertMultiLineEqual( expected_state, actual_state, - "expected from {}; actual from {}" - .format(expected_provenance, actual_provenance) + f"expected from {expected_provenance}; actual from {actual_provenance}" ) self.assertMultiLineEqual( expected_table, actual_table, - "expected from {}; actual from {}" - .format(expected_provenance, actual_provenance) + f"expected from {expected_provenance}; actual from {actual_provenance}" ) results.setdefault(frozenset(), Result("", "", "hardcoded initial state")) @@ -179,8 +177,7 @@ def check_invariants(actual_provenance): else: active_ops.remove(op_ix) check_invariants( - "running ctors {}, then running dtors {}" - .format(ctor_order[:last_ctor + 1], dtor_order[:i + 1]) + f"running ctors {ctor_order[:last_ctor + 1]}, then running dtors {dtor_order[:i + 1]}" ) return results[set_to_report][0] diff --git a/test/test_jit.py b/test/test_jit.py index f5eeebd05c229..cd7312ad92ec0 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -6561,8 +6561,7 @@ def func(a, b): continue if isinstance(res_python, float) and math.isnan(res_python) and math.isnan(res_script): continue - msg = ("Failed on {func_name} with inputs {a} {b}. Python: {res_python}, Script: {res_script}" - .format(func_name=func_name, a=a, b=b, res_python=res_python, res_script=res_script)) + msg = (f"Failed on {func_name} with inputs {a} {b}. Python: {res_python}, Script: {res_script}") # math.pow() behavior has changed in 3.11, see https://docs.python.org/3/library/math.html#math.pow if sys.version_info >= (3, 11) and func_name == "pow" and a == 0.0 and b == -math.inf: self.assertTrue(res_python == math.inf and type(res_script) is RuntimeError) @@ -7641,8 +7640,7 @@ def test(op, args): self.assertEqual( cu.func(), scope['func'](), - msg="Failed with op: {}, lhs: {}, rhs: {}" - .format(op, args[0], args[1]) + msg=f"Failed with op: {op}, lhs: {args[0]}, rhs: {args[1]}" ) ops = ['is', 'is not'] @@ -7723,8 +7721,7 @@ def test(inp, typ, type_hint): self.assertEqual( cu.func(inp), scope['func'](inp), - msg="Failed with typ: {}" - .format(typ) + msg=f"Failed with typ: {typ}" ) inputs = [True, 1, 1.0, torch.tensor(1), [1, 2], (1.0,), [1, 2], 1] diff --git a/test/test_linalg.py b/test/test_linalg.py index dfefee9c00f8d..9c85af4fbd6ea 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -1553,8 +1553,7 @@ def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex): @precisionOverride({torch.cfloat: 5e-4}) def test_norm_complex(self, device, dtype): def gen_error_message(input_size, ord, keepdim, dim=None): - return "complex norm failed for input size {}, ord={}, keepdim={}, dim={}".format( - input_size, ord, keepdim, dim) + return f"complex norm failed for input size {input_size}, ord={ord}, keepdim={keepdim}, dim={dim}" vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf] matrix_ords = [None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf] @@ -2094,8 +2093,7 @@ def test_eigvals_errors_and_warnings(self, device, dtype): @skipCPUIfNoLapack def test_norm_old(self, device): def gen_error_message(input_size, p, keepdim, dim=None): - return "norm failed for input size {}, p={}, keepdim={}, dim={}".format( - input_size, p, keepdim, dim) + return f"norm failed for input size {input_size}, p={p}, keepdim={keepdim}, dim={dim}" # 'nuc' norm uses SVD, and thus its precsion is much lower than other norms. # test_svd takes @precisionOverride({torch.float: 1e-4, torch.cfloat: 2e-4}), @@ -2192,8 +2190,7 @@ def test_norm_old_nan_propagation(self, device): @skipCPUIfNoLapack def test_norm_complex_old(self, device): def gen_error_message(input_size, p, keepdim, dim=None): - return "complex norm failed for input size {}, p={}, keepdim={}, dim={}".format( - input_size, p, keepdim, dim) + return f"complex norm failed for input size {input_size}, p={p}, keepdim={keepdim}, dim={dim}" for keepdim in [False, True]: # vector norm diff --git a/test/test_namedtensor.py b/test/test_namedtensor.py index 572a2c290c3a4..ea45bba79beca 100644 --- a/test/test_namedtensor.py +++ b/test/test_namedtensor.py @@ -92,8 +92,7 @@ def _test_name_inference(self, op, args=(), expected_names=(), device='cpu', return result = op(*args) self.assertEqual(result.names, expected_names, - msg='Name inference for {} on device {} failed'.format( - op.__name__, device)) + msg=f'Name inference for {op.__name__} on device {device} failed') # TODO(rzou): Some form of this check should be added to self.assertEqual. # Right now I don't know what it should look like. diff --git a/test/test_ops.py b/test/test_ops.py index 440cb8f942cd0..3b43a56bc4c36 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1390,9 +1390,7 @@ def _tensor_requires_grad(x): # Partially supporting a dtype is not an error, but we print a warning if (len(partially_supported_forward) + len(partially_supported_backward)) > 0: - msg = "Some dtypes for {} on device type {} are only partially supported!\n".format( - op.name, device_type - ) + msg = f"Some dtypes for {op.name} on device type {device_type} are only partially supported!\n" if len(partially_supported_forward) > 0: msg = ( msg @@ -1426,9 +1424,7 @@ def _tensor_requires_grad(x): return # Generates error msg - msg = "The supported dtypes for {} on device type {} are incorrect!\n".format( - op.name, device_type - ) + msg = f"The supported dtypes for {op.name} on device type {device_type} are incorrect!\n" if len(supported_but_unclaimed_forward) > 0: msg = ( msg diff --git a/test/test_torch.py b/test/test_torch.py index 6867b68fec5c8..83b82c2174a77 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -8589,8 +8589,7 @@ def test_device(self): def get_expected_device_repr(device): if device.index is not None: - return "device(type='{type}', index={index})".format( - type=device.type, index=device.index) + return f"device(type='{device.type}', index={device.index})" return f"device(type='{device.type}')" diff --git a/test/test_unary_ufuncs.py b/test/test_unary_ufuncs.py index 2fe550a791728..93f75cc4d291e 100644 --- a/test/test_unary_ufuncs.py +++ b/test/test_unary_ufuncs.py @@ -162,9 +162,7 @@ def assertEqualHelper( ) else: self.fail( - "Expected dtype {} but got {}!".format( - expected.dtype, actual.dtype - ) + f"Expected dtype {expected.dtype} but got {actual.dtype}!" ) self.assertEqual( diff --git a/tools/amd_build/build_amd.py b/tools/amd_build/build_amd.py index 5d14e9266f3b4..8499fa733156b 100755 --- a/tools/amd_build/build_amd.py +++ b/tools/amd_build/build_amd.py @@ -153,12 +153,12 @@ def is_hip_clang() -> bool: lines = sources.readlines() newlines = [line.replace(" hip_hcc ", " amdhip64 ") for line in lines] if lines == newlines: - print("%s skipped" % gloo_cmake_file) + print(f"{gloo_cmake_file} skipped") else: with open(gloo_cmake_file, "w") as sources: for line in newlines: sources.write(line) - print("%s updated" % gloo_cmake_file) + print(f"{gloo_cmake_file} updated") gloo_cmake_file = "third_party/gloo/cmake/Modules/Findrccl.cmake" if os.path.exists(gloo_cmake_file): @@ -167,12 +167,12 @@ def is_hip_clang() -> bool: lines = sources.readlines() newlines = [line.replace("RCCL_LIBRARY", "RCCL_LIB_PATH") for line in lines] if lines == newlines: - print("%s skipped" % gloo_cmake_file) + print(f"{gloo_cmake_file} skipped") else: with open(gloo_cmake_file, "w") as sources: for line in newlines: sources.write(line) - print("%s updated" % gloo_cmake_file) + print(f"{gloo_cmake_file} updated") # TODO Remove once gloo submodule is recent enough to contain upstream fix. if is_hip_clang(): @@ -183,12 +183,12 @@ def is_hip_clang() -> bool: lines = sources.readlines() newlines = [line.replace("HIP_HCC_FLAGS", "HIP_CLANG_FLAGS") for line in lines] if lines == newlines: - print("%s skipped" % gloo_cmake_file) + print(f"{gloo_cmake_file} skipped") else: with open(gloo_cmake_file, "w") as sources: for line in newlines: sources.write(line) - print("%s updated" % gloo_cmake_file) + print(f"{gloo_cmake_file} updated") hipify_python.hipify( project_directory=proj_dir, diff --git a/tools/autograd/load_derivatives.py b/tools/autograd/load_derivatives.py index b846892b0e3ed..5bba7a3baf763 100644 --- a/tools/autograd/load_derivatives.py +++ b/tools/autograd/load_derivatives.py @@ -392,12 +392,10 @@ def repl(m: Any) -> str: # Call into the forward again. We need two cases here to handle both Tensor methods and at:: functions. if Variant.function in f.variants: - fw_formula = "at::{}({})".format(defn_name, ", ".join(new_args)) + fw_formula = f"at::{defn_name}({', '.join(new_args)})" else: assert Variant.method in f.variants - fw_formula = "{}.{}({})".format( - new_args[0], defn_name, ", ".join(new_args[1:]) - ) + fw_formula = f"{new_args[0]}.{defn_name}({', '.join(new_args[1:])})" # All of the input tangents are always used so all of them are required here. required_inputs_tangent = tuple(diff_arg_names) @@ -808,9 +806,7 @@ def stride_expr(name: str) -> str: ( r"{}.sym_size\((-?\w+)\)", { - "suffix": lambda m: "_sym_argsize_{}".format( - m.groups()[0].replace("-", "minus_") - ), + "suffix": lambda m: f"_sym_argsize_{m.groups()[0].replace('-', 'minus_')}", "nctype": lambda name: NamedCType(name, BaseCType(SymIntT)), }, ), diff --git a/tools/download_mnist.py b/tools/download_mnist.py index ac9c049bdeedb..021904959a9e0 100644 --- a/tools/download_mnist.py +++ b/tools/download_mnist.py @@ -26,7 +26,7 @@ def report_download_progress( if file_size != -1: percent = min(1, (chunk_number * chunk_size) / file_size) bar = "#" * int(64 * percent) - sys.stdout.write("\r0% |{:<64}| {}%".format(bar, int(percent * 100))) + sys.stdout.write(f"\r0% |{bar:<64}| {int(percent * 100)}%") def download(destination_path: str, resource: str, quiet: bool) -> None: diff --git a/tools/gdb/pytorch-gdb.py b/tools/gdb/pytorch-gdb.py index dec48f16d4288..3ec9aeff384ab 100644 --- a/tools/gdb/pytorch-gdb.py +++ b/tools/gdb/pytorch-gdb.py @@ -48,11 +48,11 @@ def invoke(self, args: str, from_tty: bool) -> None: return name = args[0] with DisableBreakpoints(): - res = gdb.parse_and_eval("torch::gdb::tensor_repr(%s)" % name) - print("Python-level repr of %s:" % name) + res = gdb.parse_and_eval(f"torch::gdb::tensor_repr({name})") + print(f"Python-level repr of {name}:") print(res.string()) # torch::gdb::tensor_repr returns a malloc()ed buffer, let's free it - gdb.parse_and_eval("(void)free(%s)" % int(res)) + gdb.parse_and_eval(f"(void)free({int(res)})") TensorRepr() diff --git a/tools/gen_vulkan_spv.py b/tools/gen_vulkan_spv.py index 3fb0ea1d75d7a..fc485eb0cbf47 100644 --- a/tools/gen_vulkan_spv.py +++ b/tools/gen_vulkan_spv.py @@ -33,7 +33,7 @@ def construct_mapping(self, node, deep=False): # type: ignore[no-untyped-def] raise ConstructorError( None, None, - "expected a mapping node, but found %s" % node.id, + f"expected a mapping node, but found {node.id}", node.start_mark, ) mapping = {} @@ -353,7 +353,7 @@ def genCppH( shader_info = getShaderInfo(srcPath) tile_size = ( - "{{{}}}".format(", ".join(str(x) for x in shader_info.tile_size)) + f"{{{', '.join(str(x) for x in shader_info.tile_size)}}}" if (len(shader_info.tile_size) > 0) else "std::vector()" ) @@ -385,11 +385,7 @@ def genCppH( for registry_key in registry_keys: shader_info_registry_code.append( textwrap.indent( - "{{\"{}\", {{{{\"{}\", \"{}\"}}}}}}".format( - op_name, - registry_key, - name, - ), + f"{{\"{op_name}\", {{{{\"{registry_key}\", \"{name}\"}}}}}}", " ", ), ) diff --git a/tools/linter/adapters/flake8_linter.py b/tools/linter/adapters/flake8_linter.py index 20c114f19c03b..20c9c7cea316e 100644 --- a/tools/linter/adapters/flake8_linter.py +++ b/tools/linter/adapters/flake8_linter.py @@ -289,10 +289,7 @@ def check_files( LintMessage( path=match["file"], name=match["code"], - description="{}\nSee {}".format( - match["message"], - get_issue_documentation_url(match["code"]), - ), + description=f"{match['message']}\nSee {get_issue_documentation_url(match['code'])}", line=int(match["line"]), char=int(match["column"]) if match["column"] is not None and not match["column"].startswith("-") diff --git a/tools/linter/adapters/s3_init.py b/tools/linter/adapters/s3_init.py index c3d6e8e03c049..7befbf27f9016 100644 --- a/tools/linter/adapters/s3_init.py +++ b/tools/linter/adapters/s3_init.py @@ -59,7 +59,7 @@ def report_download_progress( if file_size != -1: percent = min(1, (chunk_number * chunk_size) / file_size) bar = "#" * int(64 * percent) - sys.stdout.write("\r0% |{:<64}| {}%".format(bar, int(percent * 100))) + sys.stdout.write(f"\r0% |{bar:<64}| {int(percent * 100)}%") def check(binary_path: Path, reference_hash: str) -> bool: diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py index 57f5248ba84d5..262eee5390a3f 100644 --- a/tools/pyi/gen_pyi.py +++ b/tools/pyi/gen_pyi.py @@ -185,9 +185,7 @@ def sig_for_ops(opname: str) -> List[str]: # we have to do this by hand, because they are hand-bound in Python - assert opname.endswith("__") and opname.startswith("__"), "Unexpected op {}".format( - opname - ) + assert opname.endswith("__") and opname.startswith("__"), f"Unexpected op {opname}" name = opname[2:-2] if name in binary_ops: @@ -370,9 +368,7 @@ def gen_nn_functional(fm: FileManager) -> None: ) ], "leaky_relu_": [ - "def leaky_relu_({}) -> Tensor: ...".format( - ", ".join(["input: Tensor", "negative_slope: float = ..."]) - ) + f"def leaky_relu_({', '.join(['input: Tensor', 'negative_slope: float = ...'])}) -> Tensor: ..." ], "log_sigmoid": ["def log_sigmoid(input: Tensor) -> Tensor: ..."], "gelu": ["def gelu(input: Tensor, approximate: str = ...) -> Tensor: ..."], @@ -387,9 +383,7 @@ def gen_nn_functional(fm: FileManager) -> None: "def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ..." ], "hardsigmoid": [ - "def hardsigmoid({}) -> Tensor: ...".format( - ", ".join(["input: Tensor", "*", "out: Optional[Tensor] = None"]) - ) + f"def hardsigmoid({', '.join(['input: Tensor', '*', 'out: Optional[Tensor] = None'])}) -> Tensor: ..." ], "linear": [ "def linear({}) -> Tensor: ...".format( diff --git a/tools/test/gen_operators_yaml_test.py b/tools/test/gen_operators_yaml_test.py index 87455d3a13ff5..3c57b2a4748fc 100644 --- a/tools/test/gen_operators_yaml_test.py +++ b/tools/test/gen_operators_yaml_test.py @@ -61,9 +61,7 @@ def test_filter_creation(self): filtered_configs = list(filter(filter_func, config)) assert ( len(filtered_configs) == 2 - ), "Expected 2 elements in filtered_configs, but got {}".format( - len(filtered_configs) - ) + ), f"Expected 2 elements in filtered_configs, but got {len(filtered_configs)}" def test_verification_success(self): filter_func = make_filter_from_options( diff --git a/tools/testing/modulefinder_determinator.py b/tools/testing/modulefinder_determinator.py index 116517091b017..ce55fdb4245ab 100644 --- a/tools/testing/modulefinder_determinator.py +++ b/tools/testing/modulefinder_determinator.py @@ -133,11 +133,7 @@ def test_impact_of_file(filename: str) -> str: def log_test_reason(file_type: str, filename: str, test: str, options: Any) -> None: if options.verbose: print_to_stderr( - "Determination found {} file {} -- running {}".format( - file_type, - filename, - test, - ) + f"Determination found {file_type} file {filename} -- running {test}" ) diff --git a/torch/_appdirs.py b/torch/_appdirs.py index 9395d9fb9f5c7..46d4c599f2a67 100644 --- a/torch/_appdirs.py +++ b/torch/_appdirs.py @@ -643,24 +643,24 @@ def _get_win_folder_with_jna(csidl_name): "site_config_dir", ) - print("-- app dirs %s --" % __version__) + print(f"-- app dirs {__version__} --") print("-- app dirs (with optional 'version')") dirs = AppDirs(appname, appauthor, version="1.0") for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) + print(f"{prop}: {getattr(dirs, prop)}") print("\n-- app dirs (without optional 'version')") dirs = AppDirs(appname, appauthor) for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) + print(f"{prop}: {getattr(dirs, prop)}") print("\n-- app dirs (without optional 'appauthor')") dirs = AppDirs(appname) for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) + print(f"{prop}: {getattr(dirs, prop)}") print("\n-- app dirs (with disabled 'appauthor')") dirs = AppDirs(appname, appauthor=False) for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) + print(f"{prop}: {getattr(dirs, prop)}") diff --git a/torch/_export/verifier.py b/torch/_export/verifier.py index 73906bb6a0d7a..4f35bb4888a28 100644 --- a/torch/_export/verifier.py +++ b/torch/_export/verifier.py @@ -132,9 +132,7 @@ def check_valid_op(self, op) -> None: # NOTE(qihan): whether view_copy operators are marked as canonical is still under # discussion. raise SpecViolationError( - "Operator {}.{} is not Aten Canonical.".format( - op.__module__, op.__name__ - ) + f"Operator {op.__module__}.{op.__name__} is not Aten Canonical." ) @compatibility(is_backward_compatible=False) diff --git a/torch/_lobpcg.py b/torch/_lobpcg.py index aaed2d951b2ff..16f37cdf6a4a8 100644 --- a/torch/_lobpcg.py +++ b/torch/_lobpcg.py @@ -796,10 +796,9 @@ def update_converged_count(self): # strict ordering of eigenpairs break count += 1 - assert count >= prev_count, ( - "the number of converged eigenpairs " - "(was {}, got {}) cannot decrease".format(prev_count, count) - ) + assert ( + count >= prev_count + ), f"the number of converged eigenpairs (was {prev_count}, got {count}) cannot decrease" self.ivars["converged_count"] = count self.tvars["rerr"] = rerr return count diff --git a/torch/_lowrank.py b/torch/_lowrank.py index c6dedc00a1f8a..fe5a1f3da71d0 100644 --- a/torch/_lowrank.py +++ b/torch/_lowrank.py @@ -263,8 +263,7 @@ def pca_lowrank( q = min(6, m, n) elif not (q >= 0 and q <= min(m, n)): raise ValueError( - "q(={}) must be non-negative integer" - " and not greater than min(m, n)={}".format(q, min(m, n)) + f"q(={q}) must be non-negative integer and not greater than min(m, n)={min(m, n)}" ) if not (niter >= 0): raise ValueError(f"niter(={niter}) must be non-negative integer") diff --git a/torch/_namedtensor_internals.py b/torch/_namedtensor_internals.py index 050be84a1b713..47bdcd82d14ab 100644 --- a/torch/_namedtensor_internals.py +++ b/torch/_namedtensor_internals.py @@ -28,9 +28,7 @@ def unzip_namedshape(namedshape): namedshape = namedshape.items() if not hasattr(namedshape, "__iter__") and not isinstance(namedshape, tuple): raise RuntimeError( - "Expected namedshape to be OrderedDict or iterable of tuples, got: {}".format( - type(namedshape) - ) + f"Expected namedshape to be OrderedDict or iterable of tuples, got: {type(namedshape)}" ) if len(namedshape) == 0: raise RuntimeError("Expected namedshape to non-empty.") diff --git a/torch/_ops.py b/torch/_ops.py index 5903477b9103d..329d8fa534a95 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -403,9 +403,7 @@ def __init__(self, overloadpacket, op, op_dk, schema, tags): self._name = self._schema.name if schema.overload_name: self._name += "." + schema.overload_name - self.__name__ = "{}.{}".format( - self._schema.name.split("::")[1], self._overloadname - ) + self.__name__ = f"{self._schema.name.split('::')[1]}.{self._overloadname}" self.__module__ = overloadpacket.__module__ op.__module__ = overloadpacket.__module__ self.__qualname__ = self._name @@ -669,9 +667,7 @@ def __getattr__(self, key): return overload except RuntimeError: raise AttributeError( - "The underlying op of '{}' has no overload name '{}'".format( - str(self), key - ) + f"The underlying op of '{str(self)}' has no overload name '{key}'" ) from None def __iter__(self): diff --git a/torch/_prims/__init__.py b/torch/_prims/__init__.py index ac447dab410a0..ae105e0e8a16e 100644 --- a/torch/_prims/__init__.py +++ b/torch/_prims/__init__.py @@ -1463,28 +1463,20 @@ def _slice_meta( _strides = strides if strides is not None else [1] * len(start_indices) if a.ndim != len(start_indices): - msg = "Attempting to slice tensor of rank {} with start_indices of length {}!".format( - a.ndim, len(start_indices) - ) + msg = f"Attempting to slice tensor of rank {a.ndim} with start_indices of length {len(start_indices)}!" raise ValueError(msg) if a.ndim != len(limit_indices): - msg = "Attempting to slice tensor of rank {} with limit_indices of length {}!".format( - a.ndim, len(limit_indices) - ) + msg = f"Attempting to slice tensor of rank {a.ndim} with limit_indices of length {len(limit_indices)}!" raise ValueError(msg) if a.ndim != len(_strides): - msg = "Attempting to slice tensor of rank {} with strides of length {}!".format( - a.ndim, len(limit_indices) - ) + msg = f"Attempting to slice tensor of rank {a.ndim} with strides of length {len(limit_indices)}!" raise ValueError(msg) for x, y in zip(start_indices, a.shape): if x < 0: - msg = "Attempting to slice a tensor with a negative start index of {}!".format( - x - ) + msg = f"Attempting to slice a tensor with a negative start index of {x}!" raise ValueError(msg) if x > y: msg = ( @@ -1497,11 +1489,7 @@ def _slice_meta( for x, y, z in zip(limit_indices, a.shape, start_indices): if x < 0: - msg = ( - "Attempting to slice a tensor with a negative stop index of {}!".format( - x - ) - ) + msg = f"Attempting to slice a tensor with a negative stop index of {x}!" raise ValueError(msg) if x > y: msg = ( @@ -1519,9 +1507,7 @@ def _slice_meta( for x in _strides: if x <= 0: - msg = "Attempting to slice a tensor with a non-positive step of {}!".format( - x - ) + msg = f"Attempting to slice a tensor with a non-positive step of {x}!" raise ValueError(msg) new_shape = [] @@ -1582,9 +1568,7 @@ def _slice_in_dim_meta( msg = f"slice_in_dim: received a negative axis {axis}" raise ValueError(msg) if axis >= a.ndim: - msg = "slice_in_dim: axis {} is greater or equal to the rank {} of the tensor".format( - axis, a.ndim - ) + msg = f"slice_in_dim: axis {axis} is greater or equal to the rank {a.ndim} of the tensor" raise ValueError(msg) if start_index < 0: @@ -1592,21 +1576,15 @@ def _slice_in_dim_meta( raise ValueError(msg) if start_index > a.shape[axis]: - msg = "slice_in_dim: start_index is greater than the length {} of dimension {}".format( - start_index, axis - ) + msg = f"slice_in_dim: start_index is greater than the length {start_index} of dimension {axis}" raise ValueError(msg) if limit_index > a.shape[axis]: - msg = "slice_in_dim: limit_index is greater than the length {} of dimension {}".format( - limit_index, axis - ) + msg = f"slice_in_dim: limit_index is greater than the length {limit_index} of dimension {axis}" raise ValueError(msg) if limit_index < start_index: - msg = "slice_in_dim: received a limit_index {} less than the start_index {}".format( - limit_index, start_index - ) + msg = f"slice_in_dim: received a limit_index {limit_index} less than the start_index {start_index}" raise ValueError(msg) if stride < 0: @@ -1936,9 +1914,7 @@ def _reshape_meta(a: TensorLikeType, shape: ShapeType): # same number of elements numel = reduce(operator.mul, shape) if numel != a.numel(): - msg = "Attempting to reshape a tensor with {} elements to a shape with {} elements!".format( - a.numel(), numel - ) + msg = f"Attempting to reshape a tensor with {a.numel()} elements to a shape with {numel} elements!" raise ValueError(msg) return TensorMeta(a, shape=shape, strides=utils.make_contiguous_strides_for(shape)) @@ -2188,9 +2164,7 @@ def _copy_to_meta(a: TensorLikeType, b: TensorLikeType): # Validates the tensors have the same number of elements if a.numel() != b.numel(): - msg = "Attempting to copy {} elements to a tensor with {} elements!".format( - b.numel(), a.numel() - ) + msg = f"Attempting to copy {b.numel()} elements to a tensor with {a.numel()} elements!" raise RuntimeError(msg) return a diff --git a/torch/_prims/executor.py b/torch/_prims/executor.py index 325ac67a665cc..cbcfabab28be1 100644 --- a/torch/_prims/executor.py +++ b/torch/_prims/executor.py @@ -28,9 +28,7 @@ def execute( elif executor == "strictly_nvfuser": return nvfuser_execute(gm, *args, executor_parameters=executor_parameters) - msg = "Received unexpected value for 'executor': {}. Allowed values are: aten, nvfuser.".format( - executor - ) + msg = f"Received unexpected value for 'executor': {executor}. Allowed values are: aten, nvfuser." raise ValueError(msg) diff --git a/torch/_prims/nvfuser_executor.py b/torch/_prims/nvfuser_executor.py index 445a2653736bc..66f61b096ec81 100644 --- a/torch/_prims/nvfuser_executor.py +++ b/torch/_prims/nvfuser_executor.py @@ -476,9 +476,7 @@ def maybe_partition_graph( class NVTXInterpreter(torch.fx.Interpreter): def run_node(self, n): torch.cuda.nvtx.range_push( - "name: {}, args: {}, op: {}, kwargs: {}".format( - n.name, n.args, n.op, n.kwargs - ) + f"name: {n.name}, args: {n.args}, op: {n.op}, kwargs: {n.kwargs}" ) result = super().run_node(n) torch.cuda.nvtx.range_pop() diff --git a/torch/_prims_common/__init__.py b/torch/_prims_common/__init__.py index 4800966f3e2ff..67f82934b2349 100644 --- a/torch/_prims_common/__init__.py +++ b/torch/_prims_common/__init__.py @@ -143,17 +143,13 @@ def compare_tensor_meta(a: TensorLikeType, b: TensorLikeType, check_strides=Fals same_strides, idx = check_significant_strides(a, b) if not same_strides: msg = ( - "Stride mismatch! Strides are {} and {} (mismatched at {})!".format( - a.stride(), b.stride(), idx - ) + f"Stride mismatch! Strides are {a.stride()} and {b.stride()} (mismatched at {idx})!" ) raise RuntimeError(msg) if a.storage_offset() != b.storage_offset(): msg = ( - "Storage offset mismatch! Storage offsets are {} and {}!".format( - a.storage_offset(), b.storage_offset() - ) + f"Storage offset mismatch! Storage offsets are {a.storage_offset()} and {b.storage_offset()}!" ) raise RuntimeError(msg) @@ -584,9 +580,7 @@ def canonicalize_dim(rank: int, idx: int, wrap_scalar: bool = True) -> int: if _idx < 0 or _idx >= rank: # Same error message as in aten/src/ATen/WrapDimUtils.h:49 - msg = "Dimension out of range (expected to be in range of [{}, {}], but got {})".format( - -rank, rank - 1, idx - ) + msg = f"Dimension out of range (expected to be in range of [{-rank}, {rank - 1}], but got {idx})" raise IndexError(msg) return _idx @@ -710,9 +704,7 @@ def check_same_shape(*args, allow_cpu_scalar_tensors: bool): shape = arg.shape if not is_same_shape(shape, arg.shape): - msg = "Shape {} is not the expected shape {}!".format( - arg.shape, shape - ) + msg = f"Shape {arg.shape} is not the expected shape {shape}!" raise RuntimeError(msg) else: msg = ( @@ -1340,9 +1332,7 @@ def elementwise_dtypes( for x in args: if not isinstance(x, (Number, TensorLike, sympy.Symbol)): msg = ( - "Unexpected type {} when computing elementwise type promotion!".format( - str(type(x)) - ) + f"Unexpected type {str(type(x))} when computing elementwise type promotion!" ) raise ValueError(msg) diff --git a/torch/_prims_common/wrappers.py b/torch/_prims_common/wrappers.py index c9755de3e0da6..c696bb2085c4b 100644 --- a/torch/_prims_common/wrappers.py +++ b/torch/_prims_common/wrappers.py @@ -57,9 +57,7 @@ def _maybe_convert_to_type(a: NumberType, typ: type) -> NumberType: msg = f"Found unknown type {type(a)} when trying to convert scalars!" raise ValueError(msg) if not utils.is_weakly_lesser_type(type(a), typ): - msg = "Scalar {} of type {} cannot be safely cast to type {}!".format( - a, type(a), typ - ) + msg = f"Scalar {a} of type {type(a)} cannot be safely cast to type {typ}!" raise ValueError(msg) return typ(a) diff --git a/torch/_tensor.py b/torch/_tensor.py index b78c1a4ecb3a7..b61bbae5c14f8 100644 --- a/torch/_tensor.py +++ b/torch/_tensor.py @@ -336,7 +336,7 @@ def _reduce_ex_internal(self, proto): ) else: raise NotImplementedError( - "sparse tensor __reduce_ex__ for layout `%s`" % (self.layout) + f"sparse tensor __reduce_ex__ for layout `{self.layout}`" ) return (torch._utils._rebuild_sparse_tensor, args_sparse) elif self.layout in { @@ -1002,8 +1002,7 @@ def __contains__(self, element): return (element == self).any().item() # type: ignore[union-attr] raise RuntimeError( - "Tensor.__contains__ only supports Tensor or scalar, but you passed in a %s." - % type(element) + f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type(element)}." ) @property diff --git a/torch/_tensor_str.py b/torch/_tensor_str.py index 2b2f029918dfa..f18c3decf9d54 100644 --- a/torch/_tensor_str.py +++ b/torch/_tensor_str.py @@ -192,11 +192,7 @@ def width(self): def format(self, value): if self.floating_dtype: if self.sci_mode: - ret = ( - ("{{:{}.{}e}}") - .format(self.max_width, PRINT_OPTS.precision) - .format(value) - ) + ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value) elif self.int_mode: ret = f"{value:.0f}" if not (math.isinf(value) or math.isnan(value)): diff --git a/torch/_utils.py b/torch/_utils.py index 5e88a98e75ed9..bcb2c3cdad60c 100644 --- a/torch/_utils.py +++ b/torch/_utils.py @@ -253,7 +253,7 @@ def _validate_loaded_sparse_tensors(): ) else: raise NotImplementedError( - "_validate_loaded_sparse_tensors for layout `%s`" % (t.layout) + f"_validate_loaded_sparse_tensors for layout `{t.layout}`" ) finally: @@ -299,7 +299,7 @@ def _rebuild_sparse_tensor(layout, data): _sparse_tensors_to_validate.append(result) return result - raise NotImplementedError("rebuilding sparse tensor for layout %s" % (layout)) + raise NotImplementedError(f"rebuilding sparse tensor for layout {layout}") def _rebuild_device_tensor_from_numpy(data, dtype, device, requires_grad): @@ -674,9 +674,7 @@ def reraise(self): r"""Reraises the wrapped exception in the current thread""" # Format a message such as: "Caught ValueError in DataLoader worker # process 2. Original Traceback:", followed by the traceback. - msg = "Caught {} {}.\nOriginal {}".format( - self.exc_type.__name__, self.where, self.exc_msg - ) + msg = f"Caught {self.exc_type.__name__} {self.where}.\nOriginal {self.exc_msg}" if self.exc_type == KeyError: # KeyError calls repr() on its argument (usually a dict key). This # makes stack traces unreadable. It will not be changed in Python @@ -786,8 +784,7 @@ def _get_device_index( device_idx = _get_current_device_index() else: raise ValueError( - "Expected a torch.device with a specified index " - "or an integer, but got:{}".format(device) + f"Expected a torch.device with a specified index or an integer, but got:{device}" ) return device_idx diff --git a/torch/autograd/_functions/utils.py b/torch/autograd/_functions/utils.py index 2ff7169f25251..89e88e4af39ad 100644 --- a/torch/autograd/_functions/utils.py +++ b/torch/autograd/_functions/utils.py @@ -51,6 +51,5 @@ def check_onnx_broadcast(dims1, dims2): supported = False if not supported: - raise ValueError("Numpy style broadcasting is not supported in ONNX. " - "Input dims are: {}, {}".format(dims1, dims2)) + raise ValueError(f"Numpy style broadcasting is not supported in ONNX. Input dims are: {dims1}, {dims2}") return broadcast diff --git a/torch/autograd/functional.py b/torch/autograd/functional.py index c69ad4aaf69d8..109da01fb7557 100644 --- a/torch/autograd/functional.py +++ b/torch/autograd/functional.py @@ -107,8 +107,7 @@ def _validate_v(v, other, is_other_tuple): prepend = "" if is_other_tuple: prepend = f"Entry {idx} in " - raise RuntimeError("{}v has invalid size: should be {} but got {}.".format( - prepend, el_other.size(), el_v.size())) + raise RuntimeError(f"{prepend}v has invalid size: should be {el_other.size()} but got {el_v.size()}.") def _check_requires_grad(inputs, input_type, strict): diff --git a/torch/autograd/profiler_legacy.py b/torch/autograd/profiler_legacy.py index 1f71c61d51d2d..0f535f91f1283 100644 --- a/torch/autograd/profiler_legacy.py +++ b/torch/autograd/profiler_legacy.py @@ -216,10 +216,8 @@ def _get_record_key(record): elif record.kind() == 'pop': assert ( record_key in range_starts - ), """Expected record with key {} to exist in range_starts. - This means that the pop event did not have a corresponding push.""".format( - record_key - ) + ), f"""Expected record with key {record_key} to exist in range_starts. + This means that the pop event did not have a corresponding push.""" start = range_starts[record_key] diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py index 59468cf62b3ea..b7370fc96ade9 100644 --- a/torch/autograd/profiler_util.py +++ b/torch/autograd/profiler_util.py @@ -110,9 +110,7 @@ def _populate_cpu_children(self): parent.append_cpu_child(event) assert ( event.cpu_parent is None - ), "There is already a CPU parent event for {}".format( - event.key - ) + ), f"There is already a CPU parent event for {event.key}" event.set_cpu_parent(parent) break @@ -328,7 +326,7 @@ def _format_time_share(time_us, total_time_us): if total_time_us == 0: assert time_us == 0, f"Expected time_us == 0 but got {time_us}" return "NaN" - return '{:.2f}%'.format(time_us * 100.0 / total_time_us) + return f'{time_us * 100.0 / total_time_us:.2f}%' def _format_memory(nbytes): """Returns a formatted memory size string""" @@ -336,11 +334,11 @@ def _format_memory(nbytes): MB = 1024 * KB GB = 1024 * MB if (abs(nbytes) >= GB): - return '{:.2f} Gb'.format(nbytes * 1.0 / GB) + return f'{nbytes * 1.0 / GB:.2f} Gb' elif (abs(nbytes) >= MB): - return '{:.2f} Mb'.format(nbytes * 1.0 / MB) + return f'{nbytes * 1.0 / MB:.2f} Mb' elif (abs(nbytes) >= KB): - return '{:.2f} Kb'.format(nbytes * 1.0 / KB) + return f'{nbytes * 1.0 / KB:.2f} Kb' else: return str(nbytes) + ' b' @@ -906,7 +904,7 @@ def trim_path(path, src_column_width): if evt.flops <= 0: row_values.append("--") else: - row_values.append('{:8.3f}'.format(evt.flops * flops_scale)) + row_values.append(f'{evt.flops * flops_scale:8.3f}') if has_stack: src_field = "" if len(evt.stack) > 0: diff --git a/torch/backends/_nnapi/serializer.py b/torch/backends/_nnapi/serializer.py index 933b2a97144f3..b569e14196ab3 100644 --- a/torch/backends/_nnapi/serializer.py +++ b/torch/backends/_nnapi/serializer.py @@ -351,7 +351,7 @@ def get_next_operand_id(self): def add_tensor_operand(self, jitval, oper): assert isinstance(oper, Operand) if jitval in self.jitval_operand_map: - raise Exception("Duplicate tensor: %r" % jitval) + raise Exception(f"Duplicate tensor: {jitval!r}") operand_id = self.get_next_operand_id() self.operands.append(oper) @@ -918,8 +918,7 @@ def add_list_construct(self, node): self.add_tensor_sequence(output, tensors) if const_vals is None and tensors is None: raise Exception( - "Unable to handle ListConstruct node." - " Neither all constants nor all tensors. %r" % node) + f"Unable to handle ListConstruct node. Neither all constants nor all tensors. {node!r}") def add_tuple_construct(self, node): assert node.outputsSize() == 1 @@ -1974,8 +1973,7 @@ def add_conv2d_common( assert bias_oper.zero_point == 0 else: raise Exception( - "Unsupported input type for conv2d: {}" - .format(image_oper.op_type)) + f"Unsupported input type for conv2d: {image_oper.op_type}") assert len(image_oper.shape) == 4 assert len(weight_oper.shape) == 4 diff --git a/torch/cuda/memory.py b/torch/cuda/memory.py index 61b03deb83ef6..04f4096c2dea8 100644 --- a/torch/cuda/memory.py +++ b/torch/cuda/memory.py @@ -116,8 +116,7 @@ def set_per_process_memory_fraction(fraction, device: Union[Device, int] = None) if not isinstance(fraction, float): raise TypeError('Invalid type for fraction argument, must be `float`') if fraction < 0 or fraction > 1: - raise ValueError('Invalid fraction value: {}. ' - 'Allowed range: 0~1'.format(fraction)) + raise ValueError(f'Invalid fraction value: {fraction}. Allowed range: 0~1') torch._C._cuda_setMemoryFraction(fraction, device) diff --git a/torch/cuda/streams.py b/torch/cuda/streams.py index 8e5406a10a02d..bcb3e1faf40b3 100644 --- a/torch/cuda/streams.py +++ b/torch/cuda/streams.py @@ -108,8 +108,7 @@ def __hash__(self): return hash((self.cuda_stream, self.device)) def __repr__(self): - return ('' - .format(self.device, self.cuda_stream)) + return (f'') class ExternalStream(Stream): diff --git a/torch/distributions/constraint_registry.py b/torch/distributions/constraint_registry.py index 0207f88c9b19f..88497fcfbce6d 100644 --- a/torch/distributions/constraint_registry.py +++ b/torch/distributions/constraint_registry.py @@ -110,8 +110,7 @@ def construct_transform(constraint): constraint = type(constraint) if not isinstance(constraint, type) or not issubclass(constraint, constraints.Constraint): - raise TypeError('Expected constraint to be either a Constraint subclass or instance, ' - 'but got {}'.format(constraint)) + raise TypeError(f'Expected constraint to be either a Constraint subclass or instance, but got {constraint}') self._registry[constraint] = factory return factory diff --git a/torch/distributions/constraints.py b/torch/distributions/constraints.py index 5f284959beb37..28d61a5de8ccc 100644 --- a/torch/distributions/constraints.py +++ b/torch/distributions/constraints.py @@ -215,8 +215,7 @@ def check(self, value): return result def __repr__(self): - return "{}({}, {})".format(self.__class__.__name__[1:], repr(self.base_constraint), - self.reinterpreted_batch_ndims) + return f"{self.__class__.__name__[1:]}({repr(self.base_constraint)}, {self.reinterpreted_batch_ndims})" class _Boolean(Constraint): diff --git a/torch/distributions/continuous_bernoulli.py b/torch/distributions/continuous_bernoulli.py index 8e641265f2d0f..d140485669354 100644 --- a/torch/distributions/continuous_bernoulli.py +++ b/torch/distributions/continuous_bernoulli.py @@ -51,7 +51,7 @@ def __init__(self, probs=None, logits=None, lims=(0.499, 0.501), validate_args=N # close to 0 and 1, later on; otherwise the clamped 'probs' would always pass if validate_args is not None: if not self.arg_constraints['probs'].check(self.probs).all(): - raise ValueError("The parameter {} has invalid values".format('probs')) + raise ValueError("The parameter probs has invalid values") self.probs = clamp_probs(self.probs) else: is_scalar = isinstance(logits, Number) diff --git a/torch/distributions/kl.py b/torch/distributions/kl.py index 4eda85ef75b68..7b2ce5b58ecd3 100644 --- a/torch/distributions/kl.py +++ b/torch/distributions/kl.py @@ -820,8 +820,7 @@ def _add_kl_info(): rows = ["KL divergence is currently implemented for the following distribution pairs:"] for p, q in sorted(_KL_REGISTRY, key=lambda p_q: (p_q[0].__name__, p_q[1].__name__)): - rows.append("* :class:`~torch.distributions.{}` and :class:`~torch.distributions.{}`" - .format(p.__name__, q.__name__)) + rows.append(f"* :class:`~torch.distributions.{p.__name__}` and :class:`~torch.distributions.{q.__name__}`") kl_info = '\n\t'.join(rows) if kl_divergence.__doc__: kl_divergence.__doc__ += kl_info # type: ignore[operator] diff --git a/torch/distributions/lowrank_multivariate_normal.py b/torch/distributions/lowrank_multivariate_normal.py index 5ca125a92dd00..7ba920e970bc5 100644 --- a/torch/distributions/lowrank_multivariate_normal.py +++ b/torch/distributions/lowrank_multivariate_normal.py @@ -90,8 +90,7 @@ def __init__(self, loc, cov_factor, cov_diag, validate_args=None): raise ValueError("cov_factor must be at least two-dimensional, " "with optional leading batch dimensions") if cov_factor.shape[-2:-1] != event_shape: - raise ValueError("cov_factor must be a batch of matrices with shape {} x m" - .format(event_shape[0])) + raise ValueError(f"cov_factor must be a batch of matrices with shape {event_shape[0]} x m") if cov_diag.shape[-1:] != event_shape: raise ValueError(f"cov_diag must be a batch of vectors with shape {event_shape}") diff --git a/torch/distributions/mixture_same_family.py b/torch/distributions/mixture_same_family.py index 65c18e1a46370..f60ad4b5419c8 100644 --- a/torch/distributions/mixture_same_family.py +++ b/torch/distributions/mixture_same_family.py @@ -189,6 +189,5 @@ def _pad_mixture_dimensions(self, x): return x def __repr__(self): - args_string = '\n {},\n {}'.format(self.mixture_distribution, - self.component_distribution) + args_string = f'\n {self.mixture_distribution},\n {self.component_distribution}' return 'MixtureSameFamily' + '(' + args_string + ')' diff --git a/torch/functional.py b/torch/functional.py index b6aa56dd9cb11..0dc84cc45c2f5 100644 --- a/torch/functional.py +++ b/torch/functional.py @@ -117,8 +117,7 @@ def broadcast_shapes(*shapes): if isinstance(shape, (tuple, list)): for i in range(-1, -1 - len(shape), -1): if shape[i] < 0: - raise RuntimeError("Trying to create tensor with negative dimension ({}): ({})" - .format(shape[i], shape[i])) + raise RuntimeError(f"Trying to create tensor with negative dimension ({shape[i]}): ({shape[i]})") if shape[i] == 1 or shape[i] == result[i]: continue if result[i] != 1: diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py index 4ffac7b3c379f..9f95c0390098d 100644 --- a/torch/fx/experimental/symbolic_shapes.py +++ b/torch/fx/experimental/symbolic_shapes.py @@ -1887,7 +1887,7 @@ def print_results(grouped, indent, result_fn): class ShapeEnvLoggerAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): # TODO: Maybe suppress the envid if not DEBUG? - return '{}: {}'.format(self.extra['envid'], msg), kwargs + return f"{self.extra['envid']}: {msg}", kwargs ENV_COUNTER = collections.Counter() diff --git a/torch/fx/experimental/unification/multipledispatch/dispatcher.py b/torch/fx/experimental/unification/multipledispatch/dispatcher.py index ac8bc7d8dd159..65ad70a3e1abb 100644 --- a/torch/fx/experimental/unification/multipledispatch/dispatcher.py +++ b/torch/fx/experimental/unification/multipledispatch/dispatcher.py @@ -276,7 +276,7 @@ def __call__(self, *args, **kwargs): self.name, str_signature(types),),) from e def __str__(self): - return "" % self.name + return f"" __repr__ = __str__ def dispatch(self, *types): @@ -339,7 +339,7 @@ def __setstate__(self, d): @property def __doc__(self): - docs = ["Multiply dispatched method: %s" % self.name] + docs = [f"Multiply dispatched method: {self.name}"] if self.doc: docs.append(self.doc) @@ -348,7 +348,7 @@ def __doc__(self): for sig in self.ordering[::-1]: func = self.funcs[sig] if func.__doc__: - s = 'Inputs: <%s>\n' % str_signature(sig) + s = f'Inputs: <{str_signature(sig)}>\n' s += '-' * len(s) + '\n' s += func.__doc__.strip() docs.append(s) @@ -379,7 +379,7 @@ def source(self, *args, **kwargs): def source(func): - s = 'File: %s\n\n' % inspect.getsourcefile(func) + s = f'File: {inspect.getsourcefile(func)}\n\n' s = s + inspect.getsource(func) return s @@ -420,12 +420,12 @@ def str_signature(sig): def warning_text(name, amb): """ The text for ambiguity warnings """ - text = "\nAmbiguities exist in dispatched function %s\n\n" % (name) + text = f"\nAmbiguities exist in dispatched function {name}\n\n" text += "The following signatures may result in ambiguous behavior:\n" for pair in amb: text += "\t" + \ ', '.join('[' + str_signature(s) + ']' for s in pair) + "\n" text += "\n\nConsider making the following additions:\n\n" text += '\n\n'.join(['@dispatch(' + str_signature(super_signature(s)) - + ')\ndef %s(...)' % name for s in amb]) + + f')\ndef {name}(...)' for s in amb]) return text diff --git a/torch/fx/experimental/unification/multipledispatch/utils.py b/torch/fx/experimental/unification/multipledispatch/utils.py index b7b9b7b913772..4b5ec2ed63152 100644 --- a/torch/fx/experimental/unification/multipledispatch/utils.py +++ b/torch/fx/experimental/unification/multipledispatch/utils.py @@ -122,4 +122,4 @@ def typename(type): except AttributeError: if len(type) == 1: return typename(*type) - return '(%s)' % ', '.join(map(typename, type)) + return f"({', '.join(map(typename, type))})" diff --git a/torch/fx/experimental/unification/multipledispatch/variadic.py b/torch/fx/experimental/unification/multipledispatch/variadic.py index 6d50ff6a65e81..0f046ba55bd32 100644 --- a/torch/fx/experimental/unification/multipledispatch/variadic.py +++ b/torch/fx/experimental/unification/multipledispatch/variadic.py @@ -64,7 +64,7 @@ def __getitem__(cls, variadic_type): if not isinstance(variadic_type, tuple): variadic_type = variadic_type, return VariadicSignatureType( - 'Variadic[%s]' % typename(variadic_type), + f'Variadic[{typename(variadic_type)}]', (), dict(variadic_type=variadic_type, __slots__=()) ) diff --git a/torch/fx/experimental/unification/unification_tools.py b/torch/fx/experimental/unification/unification_tools.py index f3df1f0766ace..ae159b937ec07 100644 --- a/torch/fx/experimental/unification/unification_tools.py +++ b/torch/fx/experimental/unification/unification_tools.py @@ -11,8 +11,7 @@ def _get_factory(f, kwargs): factory = kwargs.pop('factory', dict) if kwargs: - raise TypeError("{}() got an unexpected keyword argument " - "'{}'".format(f.__name__, kwargs.popitem()[0])) + raise TypeError(f"{f.__name__}() got an unexpected keyword argument '{kwargs.popitem()[0]}'") return factory diff --git a/torch/fx/experimental/unification/variable.py b/torch/fx/experimental/unification/variable.py index d918ec3b6ab47..8f7efda3328b0 100644 --- a/torch/fx/experimental/unification/variable.py +++ b/torch/fx/experimental/unification/variable.py @@ -13,7 +13,7 @@ class Var: def __new__(cls, *token): if len(token) == 0: - token = "_%s" % Var._id # type: ignore[assignment] + token = f"_{Var._id}" # type: ignore[assignment] Var._id += 1 elif len(token) == 1: token = token[0] diff --git a/torch/hub.py b/torch/hub.py index 2b4c20cfa04ff..5792976eaff4b 100644 --- a/torch/hub.py +++ b/torch/hub.py @@ -34,7 +34,7 @@ def update(self, n): if self.total is None: sys.stderr.write(f"\r{self.n:.1f} bytes") else: - sys.stderr.write("\r{:.1f}%".format(100 * self.n / float(self.total))) + sys.stderr.write(f"\r{100 * self.n / float(self.total):.1f}%") sys.stderr.flush() # Don't bother implementing; use real tqdm if you want @@ -328,7 +328,7 @@ def _check_dependencies(m): if dependencies is not None: missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)] if len(missing_deps): - raise RuntimeError('Missing dependencies: {}'.format(', '.join(missing_deps))) + raise RuntimeError(f"Missing dependencies: {', '.join(missing_deps)}") def _load_entry_from_hubconf(m, model): @@ -650,8 +650,7 @@ def download_url_to_file(url: str, dst: str, hash_prefix: Optional[str] = None, if hash_prefix is not None: digest = sha256.hexdigest() if digest[:len(hash_prefix)] != hash_prefix: - raise RuntimeError('invalid hash value (expected "{}", got "{}")' - .format(hash_prefix, digest)) + raise RuntimeError(f'invalid hash value (expected "{hash_prefix}", got "{digest}")') shutil.move(f.name, dst) finally: f.close() diff --git a/torch/jit/_recursive.py b/torch/jit/_recursive.py index d40254fc45d17..ed020332fc559 100644 --- a/torch/jit/_recursive.py +++ b/torch/jit/_recursive.py @@ -703,8 +703,7 @@ def make_stubs_for_overloads(overload_info): def check_module_initialized(mod): assert isinstance(mod, torch.nn.Module) if not hasattr(mod, '_parameters'): - raise RuntimeError("'{}' has not been initialized, did you forget to call 'super()'?" - .format(torch.typename(type(mod)))) + raise RuntimeError(f"'{torch.typename(type(mod))}' has not been initialized, did you forget to call 'super()'?") # This is to avoid importing torch.distributed.nn if not hasattr(mod, 'remote_parameters'): diff --git a/torch/jit/_script.py b/torch/jit/_script.py index 63f01c3fb29e9..a235b3d2b9304 100644 --- a/torch/jit/_script.py +++ b/torch/jit/_script.py @@ -198,8 +198,7 @@ def items(self): def __setitem__(self, k, v): if k not in self: raise RuntimeError( - "Can't add a new parameter after ScriptModule construction." - " Tried to add '{}".format(k) + f"Can't add a new parameter after ScriptModule construction. Tried to add '{k}" ) self._c.setattr(k, v) @@ -798,9 +797,7 @@ def __setattr__(self, attr, value): # TODO: we don't have _concrete_type set after load(), and in general we lose constant information. # We should encode constants as class type attributes (or something) so it persists across save/load. raise AttributeError( - "Cannot mutate TorchScript constant value: '{}'. Value: '{}'".format( - attr, value - ) + f"Cannot mutate TorchScript constant value: '{attr}'. Value: '{value}'" ) else: # We allow setting Python attributes on the ScriptModule, for @@ -1296,9 +1293,7 @@ def forward(self, a) -> MyModule: # an instance instead of a Module if issubclass(obj, torch.nn.Module): raise RuntimeError( - "Type '{}' cannot be compiled since it inherits" - " from nn.Module," - " pass an instance instead".format(obj) + f"Type '{obj}' cannot be compiled since it inherits from nn.Module, pass an instance instead" ) # Enums are automatically usable in TorchScript, explicitly scripting diff --git a/torch/jit/_shape_functions.py b/torch/jit/_shape_functions.py index 6d9eeb7ea081d..fb38b535ddb3b 100644 --- a/torch/jit/_shape_functions.py +++ b/torch/jit/_shape_functions.py @@ -37,8 +37,7 @@ def broadcast(a: List[int], b: List[int]): if sizeA != sizeB and sizeA != 1 and sizeB != 1: # TODO: only assertion error is bound in C++ compilation right now raise AssertionError( - "The size of tensor a {} must match the size of tensor b (" - "{}) at non-singleton dimension {}".format(sizeA, sizeB, i) + f"The size of tensor a {sizeA} must match the size of tensor b ({sizeB}) at non-singleton dimension {i}" ) expandedSizes.append(sizeB if sizeA == 1 else sizeA) @@ -81,8 +80,7 @@ def broadcast_inplace(a: List[int], b: List[int]): dimsB = len(b) if dimsB > dimsA: raise AssertionError( - "The dims of tensor b ({}) must be less than or equal to" - "the dims of tensor a ({}) ".format(dimsB, dimsA) + f"The dims of tensor b ({dimsB}) must be less than or equal tothe dims of tensor a ({dimsA}) " ) for dimA in range(dimsA): dimB = dimsB - dimsA + dimA diff --git a/torch/jit/_trace.py b/torch/jit/_trace.py index 27332f00c3b1b..f5e3635d8932d 100644 --- a/torch/jit/_trace.py +++ b/torch/jit/_trace.py @@ -1040,8 +1040,8 @@ def register_submods(mod, prefix): for key in example_inputs: if key not in argument_names: valid_arguments = "[" + ','.join(argument_names) + "]" - raise NameError("""'{}' is not in forward() method's arguments, - valid arguments name are {}""".format(key, valid_arguments)) + raise NameError(f"""'{key}' is not in forward() method's arguments, + valid arguments name are {valid_arguments}""") module._c._create_method_from_trace_with_dict( method_name, func, diff --git a/torch/jit/frontend.py b/torch/jit/frontend.py index 582b5753788d5..d79a13a0bbc26 100644 --- a/torch/jit/frontend.py +++ b/torch/jit/frontend.py @@ -124,7 +124,7 @@ def __init__(self, ctx, offending_node, reason=''): offending_node.col_offset, offending_node.col_offset + range_len) feature_name = pretty_node_names.get(node_type, node_type.__name__) - msg = "{} {}aren't supported".format(feature_name, reason + ' ' if reason else '') + msg = f"{feature_name} {reason + ' ' if reason else ''}aren't supported" super().__init__(source_range, msg) @@ -889,8 +889,7 @@ def build_ExtSlice(ctx, base, extslice): sub_exprs.append(Dots(base.range())) else: raise NotSupportedError(base.range(), - "slicing multiple dimensions with " - "{} not supported".format(sub_type)) + f"slicing multiple dimensions with {sub_type} not supported") return sub_exprs base = build_expr(ctx, expr.value) sub_type = type(expr.slice) diff --git a/torch/jit/quantized.py b/torch/jit/quantized.py index a5cf772b28e28..5a74401a9c6f0 100644 --- a/torch/jit/quantized.py +++ b/torch/jit/quantized.py @@ -136,20 +136,17 @@ def extra_repr(self): def check_forward_input(self, input): if input.size(1) != self.input_size: raise RuntimeError( - "input has inconsistent input_size: got {}, expected {}".format( - input.size(1), self.input_size)) + f"input has inconsistent input_size: got {input.size(1)}, expected {self.input_size}") @torch.jit.script_method def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None: if input.size(0) != hx.size(0): raise RuntimeError( - "Input batch size {} doesn't match hidden{} batch size {}".format( - input.size(0), hidden_label, hx.size(0))) + f"Input batch size {input.size(0)} doesn't match hidden{hidden_label} batch size {hx.size(0)}") if hx.size(1) != self.hidden_size: raise RuntimeError( - "hidden{} has inconsistent hidden_size: got {}, expected {}".format( - hidden_label, hx.size(1), self.hidden_size)) + f"hidden{hidden_label} has inconsistent hidden_size: got {hx.size(1)}, expected {self.hidden_size}") # TODO: for some reason weak_script_method causes a destruction of the # module to occur, which in turn frees the packed_ih object via its DataPtr @@ -324,12 +321,10 @@ def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None: expected_input_dim = 2 if batch_sizes is not None else 3 if input.dim() != expected_input_dim: raise RuntimeError( - 'input must have {} dimensions, got {}'.format( - expected_input_dim, input.dim())) + f'input must have {expected_input_dim} dimensions, got {input.dim()}') if self.input_size != input.size(-1): raise RuntimeError( - 'input.size(-1) must be equal to input_size. Expected {}, got {}'.format( - self.input_size, input.size(-1))) + f'input.size(-1) must be equal to input_size. Expected {self.input_size}, got {input.size(-1)}') @torch.jit.script_method def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]: diff --git a/torch/jit/supported_ops.py b/torch/jit/supported_ops.py index 6b7a7e5f96b5b..e3664674fbd81 100644 --- a/torch/jit/supported_ops.py +++ b/torch/jit/supported_ops.py @@ -18,7 +18,7 @@ def _emit_arg(indent, i, arg): if default is not None: v = f"{v}={str(default)}" if i > 0: - v = "\n{}{}".format(" " * indent, v) + v = f"\n{' ' * indent}{v}" return v def _emit_args(indent, arguments): @@ -30,7 +30,7 @@ def _emit_ret(ret): def _emit_rets(returns): if len(returns) == 1: return _emit_ret(returns[0]) - return "Tuple[{}]".format(", ".join(_emit_ret(r) for r in returns)) + return f"Tuple[{', '.join(_emit_ret(r) for r in returns)}]" def _emit_schema(mod, name, schema, arg_start=0, padding=4): if mod is None: @@ -270,20 +270,20 @@ def _get_global_builtins(): schematized_ops_str = textwrap.indent(schematized_ops_str, '\t') schemaless_ops_str = textwrap.indent(schemaless_ops_str, '\t') magic_methods_rows_str = textwrap.indent(magic_methods_rows_str, '\t') - section = """ + section = f""" The functions in the following table are supported but do not have a static schema .. csv-table:: :header: "Function", "Note" -{} +{schemaless_ops_str} The following functions will use the corresponding magic method on :any:`TorchScript classes` .. csv-table:: :header: "Function", "Magic Method" -{} +{magic_methods_rows_str} These built-in functions use the schema @@ -291,8 +291,8 @@ def _get_global_builtins(): :: -{} - """.format(schemaless_ops_str, magic_methods_rows_str, schematized_ops_str) +{schematized_ops_str} + """ return "Python Built-in Functions", section @@ -313,9 +313,9 @@ def emit_block(decls): header, items = fn() link_target = header.replace('`', '').replace('-', '').lower().replace(' ', '-') if isinstance(items, str): - section = "{}\n{}\n{}\n".format(header, '~' * len(header), items) + section = f"{header}\n{'~' * len(header)}\n{items}\n" else: - section = "{}\n{}\n{}".format(header, '~' * len(header), emit_block(items)) + section = f"{header}\n{'~' * len(header)}\n{emit_block(items)}" section = f'.. _{link_target}:' + '\n\n' + section body += section diff --git a/torch/multiprocessing/queue.py b/torch/multiprocessing/queue.py index 3128fc9e16e7d..673c0a05c6bde 100644 --- a/torch/multiprocessing/queue.py +++ b/torch/multiprocessing/queue.py @@ -23,8 +23,7 @@ def recv(self): def __getattr__(self, name): if 'conn' in self.__dict__: return getattr(self.conn, name) - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, 'conn')) + raise AttributeError(f"'{type(self).__name__}' object has no attribute 'conn'") class Queue(multiprocessing.queues.Queue): diff --git a/torch/nn/functional.py b/torch/nn/functional.py index 0c7ebb3b2c007..c0f0e62ee4744 100644 --- a/torch/nn/functional.py +++ b/torch/nn/functional.py @@ -912,9 +912,7 @@ def _unpool_output_size( max_size = default_size[d] + stride[d] if not (min_size < output_size[d] < max_size): raise ValueError( - 'invalid output_size "{}" (dim {} must be between {} and {})'.format( - output_size, d, min_size, max_size - ) + f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})' ) ret = output_size @@ -1266,7 +1264,7 @@ def dropout(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool if has_torch_function_unary(input): return handle_torch_function(dropout, (input,), input, p=p, training=training, inplace=inplace) if p < 0.0 or p > 1.0: - raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p)) + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") return _VF.dropout_(input, p, training) if inplace else _VF.dropout(input, p, training) @@ -1278,7 +1276,7 @@ def alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace if has_torch_function_unary(input): return handle_torch_function(alpha_dropout, (input,), input, p=p, training=training, inplace=inplace) if p < 0.0 or p > 1.0: - raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p)) + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") return _VF.alpha_dropout_(input, p, training) if inplace else _VF.alpha_dropout(input, p, training) @@ -1300,7 +1298,7 @@ def dropout1d(input: Tensor, p: float = 0.5, training: bool = True, inplace: boo if has_torch_function_unary(input): return handle_torch_function(dropout1d, (input,), input, p=p, training=training, inplace=inplace) if p < 0.0 or p > 1.0: - raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p)) + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") inp_dim = input.dim() if inp_dim not in (2, 3): raise RuntimeError(f"dropout1d: Expected 2D or 3D input, but received a {inp_dim}D input. " @@ -1338,7 +1336,7 @@ def dropout2d(input: Tensor, p: float = 0.5, training: bool = True, inplace: boo if has_torch_function_unary(input): return handle_torch_function(dropout2d, (input,), input, p=p, training=training, inplace=inplace) if p < 0.0 or p > 1.0: - raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p)) + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") inp_dim = input.dim() if inp_dim not in (3, 4): warn_msg = (f"dropout2d: Received a {inp_dim}-D input to dropout2d, which is deprecated " @@ -1382,7 +1380,7 @@ def dropout3d(input: Tensor, p: float = 0.5, training: bool = True, inplace: boo if has_torch_function_unary(input): return handle_torch_function(dropout3d, (input,), input, p=p, training=training, inplace=inplace) if p < 0.0 or p > 1.0: - raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p)) + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") inp_dim = input.dim() if inp_dim not in (4, 5): warn_msg = (f"dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated " @@ -1428,7 +1426,7 @@ def feature_alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, feature_alpha_dropout, (input,), input, p=p, training=training, inplace=inplace ) if p < 0.0 or p > 1.0: - raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p)) + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") return _VF.feature_alpha_dropout_(input, p, training) if inplace else _VF.feature_alpha_dropout(input, p, training) @@ -1791,8 +1789,7 @@ def softsign(input): def _get_softmax_dim(name: str, ndim: int, stacklevel: int) -> int: warnings.warn( - "Implicit dimension choice for {} has been deprecated. " - "Change the call to include dim=X as an argument.".format(name), + f"Implicit dimension choice for {name} has been deprecated. Change the call to include dim=X as an argument.", stacklevel=stacklevel, ) if ndim == 0 or ndim == 1 or ndim == 3: @@ -2575,10 +2572,7 @@ def local_response_norm(input: Tensor, size: int, alpha: float = 1e-4, beta: flo dim = input.dim() if dim < 3: raise ValueError( - "Expected 3D or higher dimensionality \ - input (got {} dimensions)".format( - dim - ) + f"Expected 3D or higher dimensionality input (got {dim} dimensions)" ) if input.numel() == 0: @@ -3368,7 +3362,7 @@ def margin_ranking_loss( reduction_enum = _Reduction.get_enum(reduction) if (input1.dim() != input2.dim() or input1.dim() != target.dim()): raise RuntimeError( - "margin_ranking_loss : All input tensors should have same dimension but got sizes: " + f"margin_ranking_loss : All input tensors should have same dimension but got sizes: " f"input1: {input1.size()}, input2: {input2.size()}, target: {target.size()} " ) return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum) @@ -4277,8 +4271,7 @@ def grid_sample( ) if mode != "bilinear" and mode != "nearest" and mode != "bicubic": raise ValueError( - "nn.functional.grid_sample(): expected mode to be " - "'bilinear', 'nearest' or 'bicubic', but got: '{}'".format(mode) + f"nn.functional.grid_sample(): expected mode to be 'bilinear', 'nearest' or 'bicubic', but got: '{mode}'" ) if padding_mode != "zeros" and padding_mode != "border" and padding_mode != "reflection": raise ValueError( @@ -4379,15 +4372,13 @@ def affine_grid(theta: Tensor, size: List[int], align_corners: Optional[bool] = if len(size) == 4: if theta.dim() != 3 or theta.shape[-2] != 2 or theta.shape[-1] != 3: raise ValueError( - "Expected a batch of 2D affine matrices of shape Nx2x3 " - "for size {}. Got {}.".format(size, theta.shape) + f"Expected a batch of 2D affine matrices of shape Nx2x3 for size {size}. Got {theta.shape}." ) spatial_size = size[-2:] # spatial dimension sizes elif len(size) == 5: if theta.dim() != 3 or theta.shape[-2] != 3 or theta.shape[-1] != 4: raise ValueError( - "Expected a batch of 3D affine matrices of shape Nx3x4 " - "for size {}. Got {}.".format(size, theta.shape) + f"Expected a batch of 3D affine matrices of shape Nx3x4 for size {size}. Got {theta.shape}." ) spatial_size = size[-3:] # spatial dimension sizes else: diff --git a/torch/nn/init.py b/torch/nn/init.py index dc7d121f7aca2..21768a65a9304 100644 --- a/torch/nn/init.py +++ b/torch/nn/init.py @@ -540,8 +540,7 @@ def _make_deprecate(meth): old_name = new_name[:-1] def deprecated_init(*args, **kwargs): - warnings.warn("nn.init.{} is now deprecated in favor of nn.init.{}." - .format(old_name, new_name), stacklevel=2) + warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2) return meth(*args, **kwargs) deprecated_init.__doc__ = r""" diff --git a/torch/nn/modules/activation.py b/torch/nn/modules/activation.py index 28021a61a520e..0f7c504b37d10 100644 --- a/torch/nn/modules/activation.py +++ b/torch/nn/modules/activation.py @@ -60,9 +60,7 @@ def forward(self, input: Tensor) -> Tensor: def extra_repr(self): inplace_str = ', inplace=True' if self.inplace else '' - return 'threshold={}, value={}{}'.format( - self.threshold, self.value, inplace_str - ) + return f'threshold={self.threshold}, value={self.value}{inplace_str}' class ReLU(Module): @@ -237,9 +235,7 @@ def forward(self, input: Tensor) -> Tensor: def extra_repr(self) -> str: inplace_str = ', inplace=True' if self.inplace else '' - return 'min_val={}, max_val={}{}'.format( - self.min_val, self.max_val, inplace_str - ) + return f'min_val={self.min_val}, max_val={self.max_val}{inplace_str}' class ReLU6(Hardtanh): diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py index 04d9e6a649fd5..9fb1135eb9d6a 100644 --- a/torch/nn/modules/conv.py +++ b/torch/nn/modules/conv.py @@ -93,15 +93,13 @@ def __init__(self, if isinstance(padding, str): if padding not in valid_padding_strings: raise ValueError( - "Invalid padding string {!r}, should be one of {}".format( - padding, valid_padding_strings)) + f"Invalid padding string {padding!r}, should be one of {valid_padding_strings}") if padding == 'same' and any(s != 1 for s in stride): raise ValueError("padding='same' is not supported for strided convolutions") valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'} if padding_mode not in valid_padding_modes: - raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format( - valid_padding_modes, padding_mode)) + raise ValueError(f"padding_mode must be one of {valid_padding_modes}, but got padding_mode='{padding_mode}'") self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size diff --git a/torch/nn/modules/dropout.py b/torch/nn/modules/dropout.py index afe11e0859c44..d78d359d745ef 100644 --- a/torch/nn/modules/dropout.py +++ b/torch/nn/modules/dropout.py @@ -13,8 +13,7 @@ class _DropoutNd(Module): def __init__(self, p: float = 0.5, inplace: bool = False) -> None: super().__init__() if p < 0 or p > 1: - raise ValueError("dropout probability has to be between 0 and 1, " - "but got {}".format(p)) + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") self.p = p self.inplace = inplace diff --git a/torch/nn/modules/flatten.py b/torch/nn/modules/flatten.py index 1ab2dd187b0e4..5938461e15cec 100644 --- a/torch/nn/modules/flatten.py +++ b/torch/nn/modules/flatten.py @@ -47,9 +47,7 @@ def forward(self, input: Tensor) -> Tensor: return input.flatten(self.start_dim, self.end_dim) def extra_repr(self) -> str: - return 'start_dim={}, end_dim={}'.format( - self.start_dim, self.end_dim - ) + return f'start_dim={self.start_dim}, end_dim={self.end_dim}' class Unflatten(Module): diff --git a/torch/nn/modules/instancenorm.py b/torch/nn/modules/instancenorm.py index 8e0a54ed0d747..fbfbd56115952 100644 --- a/torch/nn/modules/instancenorm.py +++ b/torch/nn/modules/instancenorm.py @@ -159,8 +159,7 @@ def _get_no_batch_dim(self): def _check_input_dim(self, input): if input.dim() not in (2, 3): - raise ValueError('expected 2D or 3D input (got {}D input)' - .format(input.dim())) + raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)') class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm): @@ -198,8 +197,7 @@ def _get_no_batch_dim(self): def _check_input_dim(self, input): if input.dim() not in (2, 3): - raise ValueError('expected 2D or 3D input (got {}D input)' - .format(input.dim())) + raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)') class InstanceNorm2d(_InstanceNorm): @@ -275,8 +273,7 @@ def _get_no_batch_dim(self): def _check_input_dim(self, input): if input.dim() not in (3, 4): - raise ValueError('expected 3D or 4D input (got {}D input)' - .format(input.dim())) + raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)') class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm): @@ -314,8 +311,7 @@ def _get_no_batch_dim(self): def _check_input_dim(self, input): if input.dim() not in (3, 4): - raise ValueError('expected 3D or 4D input (got {}D input)' - .format(input.dim())) + raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)') class InstanceNorm3d(_InstanceNorm): @@ -391,8 +387,7 @@ def _get_no_batch_dim(self): def _check_input_dim(self, input): if input.dim() not in (4, 5): - raise ValueError('expected 4D or 5D input (got {}D input)' - .format(input.dim())) + raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)') class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm): @@ -430,5 +425,4 @@ def _get_no_batch_dim(self): def _check_input_dim(self, input): if input.dim() not in (4, 5): - raise ValueError('expected 4D or 5D input (got {}D input)' - .format(input.dim())) + raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)') diff --git a/torch/nn/modules/linear.py b/torch/nn/modules/linear.py index 07d429bb13b0c..03b641fbad9f1 100644 --- a/torch/nn/modules/linear.py +++ b/torch/nn/modules/linear.py @@ -114,9 +114,7 @@ def forward(self, input: Tensor) -> Tensor: return F.linear(input, self.weight, self.bias) def extra_repr(self) -> str: - return 'in_features={}, out_features={}, bias={}'.format( - self.in_features, self.out_features, self.bias is not None - ) + return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}' # This class exists solely to avoid triggering an obscure error when scripting diff --git a/torch/nn/modules/module.py b/torch/nn/modules/module.py index 965bb69613857..2e6eaec90f4c2 100644 --- a/torch/nn/modules/module.py +++ b/torch/nn/modules/module.py @@ -529,8 +529,7 @@ def register_buffer(self, name: str, tensor: Optional[Tensor], persistent: bool raise AttributeError( "cannot assign buffer before Module.__init__() call") elif not isinstance(name, str): - raise TypeError("buffer name should be a string. " - "Got {}".format(torch.typename(name))) + raise TypeError(f"buffer name should be a string. Got {torch.typename(name)}") elif '.' in name: raise KeyError("buffer name can't contain \".\"") elif name == '': @@ -570,8 +569,7 @@ def register_parameter(self, name: str, param: Optional[Parameter]) -> None: "cannot assign parameter before Module.__init__() call") elif not isinstance(name, str): - raise TypeError("parameter name should be a string. " - "Got {}".format(torch.typename(name))) + raise TypeError(f"parameter name should be a string. Got {torch.typename(name)}") elif '.' in name: raise KeyError("parameter name can't contain \".\"") elif name == '': @@ -609,11 +607,9 @@ def add_module(self, name: str, module: Optional['Module']) -> None: module (Module): child module to be added to the module. """ if not isinstance(module, Module) and module is not None: - raise TypeError("{} is not a Module subclass".format( - torch.typename(module))) + raise TypeError(f"{torch.typename(module)} is not a Module subclass") elif not isinstance(name, str): - raise TypeError("module name should be a string. Got {}".format( - torch.typename(name))) + raise TypeError(f"module name should be a string. Got {torch.typename(name)}") elif hasattr(self, name) and name not in self._modules: raise KeyError(f"attribute '{name}' already exists") elif '.' in name: @@ -1696,8 +1692,7 @@ def __getattr__(self, name: str) -> Any: modules = self.__dict__['_modules'] if name in modules: return modules[name] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, name)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") def __setattr__(self, name: str, value: Union[Tensor, 'Module']) -> None: def remove_from(*dicts_or_sets): diff --git a/torch/nn/modules/pooling.py b/torch/nn/modules/pooling.py index 17836f339f4ad..7f7c7e42e9a71 100644 --- a/torch/nn/modules/pooling.py +++ b/torch/nn/modules/pooling.py @@ -249,9 +249,7 @@ def forward(self, input: Tensor): class _MaxUnpoolNd(Module): def extra_repr(self) -> str: - return 'kernel_size={}, stride={}, padding={}'.format( - self.kernel_size, self.stride, self.padding - ) + return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}' class MaxUnpool1d(_MaxUnpoolNd): @@ -489,9 +487,7 @@ class _AvgPoolNd(Module): __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad'] def extra_repr(self) -> str: - return 'kernel_size={}, stride={}, padding={}'.format( - self.kernel_size, self.stride, self.padding - ) + return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}' class AvgPool1d(_AvgPoolNd): @@ -794,8 +790,7 @@ def __init__(self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = No raise ValueError("only one of output_size and output_ratio may be specified") if self.output_ratio is not None: if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1): - raise ValueError("output_ratio must be between 0 and 1 (got {})" - .format(output_ratio)) + raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})") def forward(self, input: Tensor): return F.fractional_max_pool2d( @@ -865,8 +860,7 @@ def __init__(self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = No raise ValueError("only one of output_size and output_ratio may be specified") if self.output_ratio is not None: if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1): - raise ValueError("output_ratio must be between 0 and 1 (got {})" - .format(output_ratio)) + raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})") def forward(self, input: Tensor): return F.fractional_max_pool3d( diff --git a/torch/nn/modules/rnn.py b/torch/nn/modules/rnn.py index 5e5254cd85999..211c4e65768f4 100644 --- a/torch/nn/modules/rnn.py +++ b/torch/nn/modules/rnn.py @@ -216,17 +216,14 @@ def reset_parameters(self) -> None: def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None: if not torch.jit.is_scripting(): if input.dtype != self._flat_weights[0].dtype and not torch._C._is_any_autocast_enabled(): - raise ValueError('input must have the type {}, got type {}'.format( - self._flat_weights[0].dtype, input.dtype)) + raise ValueError(f'input must have the type {self._flat_weights[0].dtype}, got type {input.dtype}') expected_input_dim = 2 if batch_sizes is not None else 3 if input.dim() != expected_input_dim: raise RuntimeError( - 'input must have {} dimensions, got {}'.format( - expected_input_dim, input.dim())) + f'input must have {expected_input_dim} dimensions, got {input.dim()}') if self.input_size != input.size(-1): raise RuntimeError( - 'input.size(-1) must be equal to input_size. Expected {}, got {}'.format( - self.input_size, input.size(-1))) + f'input.size(-1) must be equal to input_size. Expected {self.input_size}, got {input.size(-1)}') def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]: if batch_sizes is not None: diff --git a/torch/nn/parallel/comm.py b/torch/nn/parallel/comm.py index 35f8cf24c4e7c..1d3437084e5e8 100644 --- a/torch/nn/parallel/comm.py +++ b/torch/nn/parallel/comm.py @@ -29,8 +29,7 @@ def broadcast(tensor, devices=None, *, out=None): tensor = _handle_complex(tensor) if not ((devices is None) ^ (out is None)): raise RuntimeError( - "Exactly one of 'devices' and 'out' must be specified, but got " - "devices={} and out={}".format(devices, out)) + f"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}") if devices is not None: devices = [_get_device_index(d) for d in devices] return torch._C._broadcast(tensor, devices) @@ -83,8 +82,7 @@ def reduce_add(inputs, destination=None): if inp.size() != input_size: got = 'x'.join(str(x) for x in inp.size()) expected = 'x'.join(str(x) for x in input_size) - raise ValueError("input {} has invalid size: got {}, but expected " - "{}".format(i, got, expected)) + raise ValueError(f"input {i} has invalid size: got {got}, but expected {expected}") if root_index is None: raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors") @@ -190,12 +188,10 @@ def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out= else: if devices is not None: raise RuntimeError( - "'devices' must not be specified when 'out' is specified, but " - "got devices={}".format(devices)) + f"'devices' must not be specified when 'out' is specified, but got devices={devices}") if chunk_sizes is not None: raise RuntimeError( - "'chunk_sizes' must not be specified when 'out' is specified, " - "but got chunk_sizes={}".format(chunk_sizes)) + f"'chunk_sizes' must not be specified when 'out' is specified, but got chunk_sizes={chunk_sizes}") return tuple(torch._C._scatter_out(tensor, out, dim, streams)) @@ -236,6 +232,5 @@ def gather(tensors, dim=0, destination=None, *, out=None): else: if destination is not None: raise RuntimeError( - "'destination' must not be specified when 'out' is specified, but " - "got destination={}".format(destination)) + f"'destination' must not be specified when 'out' is specified, but got destination={destination}") return torch._C._gather_out(tensors, out, dim) diff --git a/torch/nn/utils/_named_member_accessor.py b/torch/nn/utils/_named_member_accessor.py index 426c6df7f3722..91c3ab54f9217 100644 --- a/torch/nn/utils/_named_member_accessor.py +++ b/torch/nn/utils/_named_member_accessor.py @@ -318,9 +318,7 @@ def swap_tensors_dict( # Swap back if any key is missing when allow_missing is False for name, orig_tensor in orig_named_tensors.items(): self.swap_tensor(name, orig_tensor, allow_missing=True) - raise RuntimeError( - "Missing key(s): {}.".format(", ".join(map(repr, missing_keys))) - ) + raise RuntimeError(f"Missing key(s): {', '.join(map(repr, missing_keys))}.") return orig_named_tensors, missing_keys def check_keys(self, keys: Iterable[str]) -> Tuple[List[str], List[str]]: diff --git a/torch/nn/utils/convert_parameters.py b/torch/nn/utils/convert_parameters.py index 37576aa539294..4040fad0cdec5 100644 --- a/torch/nn/utils/convert_parameters.py +++ b/torch/nn/utils/convert_parameters.py @@ -34,8 +34,7 @@ def vector_to_parameters(vec: torch.Tensor, parameters: Iterable[torch.Tensor]) """ # Ensure vec of type Tensor if not isinstance(vec, torch.Tensor): - raise TypeError('expected torch.Tensor, but got: {}' - .format(torch.typename(vec))) + raise TypeError(f'expected torch.Tensor, but got: {torch.typename(vec)}') # Flag for the device where the parameter is located param_device = None diff --git a/torch/nn/utils/prune.py b/torch/nn/utils/prune.py index 9c81618d7646a..086f08f0c18b1 100644 --- a/torch/nn/utils/prune.py +++ b/torch/nn/utils/prune.py @@ -63,9 +63,7 @@ def apply_mask(self, module): """ # to carry out the multiplication, the mask needs to have been computed, # so the pruning method must know what tensor it's operating on - assert self._tensor_name is not None, "Module {} has to be pruned".format( - module - ) # this gets set in apply() + assert self._tensor_name is not None, f"Module {module} has to be pruned" # this gets set in apply() mask = getattr(module, self._tensor_name + "_mask") orig = getattr(module, self._tensor_name + "_orig") pruned_tensor = mask.to(dtype=orig.dtype) * orig @@ -155,10 +153,7 @@ def _get_composite_method(cls, module, name, *args, **kwargs): if importance_scores is not None: assert ( importance_scores.shape == orig.shape - ), "importance_scores should have the same shape as parameter \ - {} of {}".format( - name, module - ) + ), f"importance_scores should have the same shape as parameter {name} of {module}" else: importance_scores = orig @@ -244,10 +239,7 @@ def remove(self, module): # before removing pruning from a tensor, it has to have been applied assert ( self._tensor_name is not None - ), "Module {} has to be pruned\ - before pruning can be removed".format( - module - ) # this gets set in apply() + ), f"Module {module} has to be pruned before pruning can be removed" # this gets set in apply() # to update module[name] to latest trained weights weight = self.apply_mask(module) # masked weights @@ -384,9 +376,7 @@ def _combine_masks(method, t, mask): # if dim is still negative after subtracting it from n_dims if dim < 0: raise IndexError( - "Index is out of bounds for tensor with dimensions {}".format( - n_dims - ) + f"Index is out of bounds for tensor with dimensions {n_dims}" ) # find channels along dim = dim that aren't already tots 0ed out keep_channel = mask.sum(dim=[d for d in range(n_dims) if d != dim]) != 0 @@ -1192,8 +1182,7 @@ def remove(module, name): return module raise ValueError( - "Parameter '{}' of module {} has to be pruned " - "before pruning can be removed".format(name, module) + f"Parameter '{name}' of module {module} has to be pruned before pruning can be removed" ) @@ -1244,7 +1233,7 @@ def _validate_pruning_amount_init(amount): """ if not isinstance(amount, numbers.Real): raise TypeError( - "Invalid type for amount: {}. Must be int or float." "".format(amount) + f"Invalid type for amount: {amount}. Must be int or float." ) if (isinstance(amount, numbers.Integral) and amount < 0) or ( @@ -1252,9 +1241,7 @@ def _validate_pruning_amount_init(amount): and (float(amount) > 1.0 or float(amount) < 0.0) ): raise ValueError( - "amount={} should either be a float in the " - "range [0, 1] or a non-negative integer" - "".format(amount) + f"amount={amount} should either be a float in the range [0, 1] or a non-negative integer" ) @@ -1276,8 +1263,7 @@ def _validate_pruning_amount(amount, tensor_size): if isinstance(amount, numbers.Integral) and amount > tensor_size: raise ValueError( - "amount={} should be smaller than the number of " - "parameters to prune={}".format(amount, tensor_size) + f"amount={amount} should be smaller than the number of parameters to prune={tensor_size}" ) diff --git a/torch/nn/utils/spectral_norm.py b/torch/nn/utils/spectral_norm.py index a6d45342a37b0..b9b9dbf9b2880 100644 --- a/torch/nn/utils/spectral_norm.py +++ b/torch/nn/utils/spectral_norm.py @@ -117,8 +117,7 @@ def _solve_v_and_rescale(self, weight_mat, u, target_sigma): def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float) -> 'SpectralNorm': for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, SpectralNorm) and hook.name == name: - raise RuntimeError("Cannot register two spectral_norm hooks on " - "the same parameter {}".format(name)) + raise RuntimeError(f"Cannot register two spectral_norm hooks on the same parameter {name}") fn = SpectralNorm(name, n_power_iterations, dim, eps) weight = module._parameters[name] @@ -300,8 +299,7 @@ def remove_spectral_norm(module: T_module, name: str = 'weight') -> T_module: del module._forward_pre_hooks[k] break else: - raise ValueError("spectral_norm of '{}' not found in {}".format( - name, module)) + raise ValueError(f"spectral_norm of '{name}' not found in {module}") for k, hook in module._state_dict_hooks.items(): if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name: diff --git a/torch/nn/utils/stateless.py b/torch/nn/utils/stateless.py index 6d934905e77ba..007d3bb82c5a2 100644 --- a/torch/nn/utils/stateless.py +++ b/torch/nn/utils/stateless.py @@ -109,12 +109,10 @@ def _reparametrize_module( error_msgs = [] if len(unexpected_keys) > 0: error_msgs.append( - "Unexpected key(s): {}.".format(", ".join(map(repr, unexpected_keys))) + f"Unexpected key(s): {', '.join(map(repr, unexpected_keys))}." ) if len(missing_keys) > 0: - error_msgs.append( - "Missing key(s): {}.".format(", ".join(map(repr, missing_keys))) - ) + error_msgs.append(f"Missing key(s): {', '.join(map(repr, missing_keys))}.") if len(error_msgs) > 0: raise RuntimeError( "Error(s) in reparametrizing for {}:\n\t{}".format( diff --git a/torch/nn/utils/weight_norm.py b/torch/nn/utils/weight_norm.py index 07f0707024866..719cf36a13383 100644 --- a/torch/nn/utils/weight_norm.py +++ b/torch/nn/utils/weight_norm.py @@ -31,8 +31,7 @@ def apply(module, name: str, dim: int) -> 'WeightNorm': for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, WeightNorm) and hook.name == name: - raise RuntimeError("Cannot register two weight_norm hooks on " - "the same parameter {}".format(name)) + raise RuntimeError(f"Cannot register two weight_norm hooks on the same parameter {name}") if dim is None: dim = -1 @@ -151,5 +150,4 @@ def remove_weight_norm(module: T_module, name: str = 'weight') -> T_module: del module._forward_pre_hooks[k] return module - raise ValueError("weight_norm of '{}' not found in {}" - .format(name, module)) + raise ValueError(f"weight_norm of '{name}' not found in {module}") diff --git a/torch/optim/adagrad.py b/torch/optim/adagrad.py index 1a3e5120004f9..c1e981809c4ec 100644 --- a/torch/optim/adagrad.py +++ b/torch/optim/adagrad.py @@ -30,9 +30,7 @@ def __init__( raise ValueError(f"Invalid weight_decay value: {weight_decay}") if not 0.0 <= initial_accumulator_value: raise ValueError( - "Invalid initial_accumulator_value value: {}".format( - initial_accumulator_value - ) + f"Invalid initial_accumulator_value value: {initial_accumulator_value}" ) if not 0.0 <= eps: raise ValueError(f"Invalid epsilon value: {eps}") diff --git a/torch/optim/lr_scheduler.py b/torch/optim/lr_scheduler.py index d0f85a5daea0c..d78fdbf18580b 100644 --- a/torch/optim/lr_scheduler.py +++ b/torch/optim/lr_scheduler.py @@ -29,8 +29,7 @@ def __init__(self, optimizer, last_epoch=-1, verbose=False): # Attach optimizer if not isinstance(optimizer, Optimizer): - raise TypeError('{} is not an Optimizer'.format( - type(optimizer).__name__)) + raise TypeError(f'{type(optimizer).__name__} is not an Optimizer') self.optimizer = optimizer # Initialize epoch and base learning rates @@ -115,13 +114,11 @@ def print_lr(self, is_verbose, group, lr, epoch=None): """ if is_verbose: if epoch is None: - print('Adjusting learning rate' - ' of group {} to {:.4e}.'.format(group, lr)) + print(f'Adjusting learning rate of group {group} to {lr:.4e}.') else: epoch_str = ("%.2f" if isinstance(epoch, float) else "%.5d") % epoch - print('Epoch {}: adjusting learning rate' - ' of group {} to {:.4e}.'.format(epoch_str, group, lr)) + print(f'Epoch {epoch_str}: adjusting learning rate of group {group} to {lr:.4e}.') def step(self, epoch=None): @@ -215,8 +212,7 @@ def __init__(self, optimizer, lr_lambda, last_epoch=-1, verbose=False): self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) else: if len(lr_lambda) != len(optimizer.param_groups): - raise ValueError("Expected {} lr_lambdas, but got {}".format( - len(optimizer.param_groups), len(lr_lambda))) + raise ValueError(f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}") self.lr_lambdas = list(lr_lambda) super().__init__(optimizer, last_epoch, verbose) @@ -299,8 +295,7 @@ def __init__(self, optimizer, lr_lambda, last_epoch=-1, verbose=False): self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) else: if len(lr_lambda) != len(optimizer.param_groups): - raise ValueError("Expected {} lr_lambdas, but got {}".format( - len(optimizer.param_groups), len(lr_lambda))) + raise ValueError(f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}") self.lr_lambdas = list(lr_lambda) super().__init__(optimizer, last_epoch, verbose) @@ -970,14 +965,12 @@ def __init__(self, optimizer, mode='min', factor=0.1, patience=10, # Attach optimizer if not isinstance(optimizer, Optimizer): - raise TypeError('{} is not an Optimizer'.format( - type(optimizer).__name__)) + raise TypeError(f'{type(optimizer).__name__} is not an Optimizer') self.optimizer = optimizer if isinstance(min_lr, (list, tuple)): if len(min_lr) != len(optimizer.param_groups): - raise ValueError("expected {} min_lrs, got {}".format( - len(optimizer.param_groups), len(min_lr))) + raise ValueError(f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}") self.min_lrs = list(min_lr) else: self.min_lrs = [min_lr] * len(optimizer.param_groups) @@ -1039,8 +1032,7 @@ def _reduce_lr(self, epoch): if self.verbose: epoch_str = ("%.2f" if isinstance(epoch, float) else "%.5d") % epoch - print('Epoch {}: reducing learning rate' - ' of group {} to {:.4e}.'.format(epoch_str, i, new_lr)) + print(f'Epoch {epoch_str}: reducing learning rate of group {i} to {new_lr:.4e}.') @property def in_cooldown(self): @@ -1197,8 +1189,7 @@ def __init__(self, # Attach optimizer if not isinstance(optimizer, Optimizer): - raise TypeError('{} is not an Optimizer'.format( - type(optimizer).__name__)) + raise TypeError(f'{type(optimizer).__name__} is not an Optimizer') self.optimizer = optimizer base_lrs = self._format_param('base_lr', optimizer, base_lr) @@ -1257,8 +1248,7 @@ def _format_param(self, name, optimizer, param): """Return correctly formatted lr/momentum for each param group.""" if isinstance(param, (list, tuple)): if len(param) != len(optimizer.param_groups): - raise ValueError("expected {} values for {}, got {}".format( - len(optimizer.param_groups), name, len(param))) + raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}") return param else: return [param] * len(optimizer.param_groups) @@ -1581,8 +1571,7 @@ def __init__(self, # Validate optimizer if not isinstance(optimizer, Optimizer): - raise TypeError('{} is not an Optimizer'.format( - type(optimizer).__name__)) + raise TypeError(f'{type(optimizer).__name__} is not an Optimizer') self.optimizer = optimizer # Validate total_steps @@ -1684,8 +1673,7 @@ def _format_param(self, name, optimizer, param): """Return correctly formatted lr/momentum for each param group.""" if isinstance(param, (list, tuple)): if len(param) != len(optimizer.param_groups): - raise ValueError("expected {} values for {}, got {}".format( - len(optimizer.param_groups), name, len(param))) + raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}") return param else: return [param] * len(optimizer.param_groups) diff --git a/torch/package/package_importer.py b/torch/package/package_importer.py index 2d313c8f14eb4..13b96f13d8775 100644 --- a/torch/package/package_importer.py +++ b/torch/package/package_importer.py @@ -484,7 +484,7 @@ def _find_and_load(self, name): return self._do_find_and_load(name) if module is None: - message = "import of {} halted; " "None in sys.modules".format(name) + message = f"import of {name} halted; None in sys.modules" raise ModuleNotFoundError(message, name=name) # To handle https://github.com/pytorch/pytorch/issues/57490, where std's diff --git a/torch/profiler/_memory_profiler.py b/torch/profiler/_memory_profiler.py index 12e5959cc9b48..46f9f22dd09d4 100644 --- a/torch/profiler/_memory_profiler.py +++ b/torch/profiler/_memory_profiler.py @@ -1129,12 +1129,12 @@ def export_memory_timeline_html(self, path, device, figsize=(20, 12), title=None with open(tmpfile.name, 'rb') as tmp: encoded = b64encode(tmp.read()).decode('utf-8') - html = """ + html = f""" GPU Memory Timeline HTML - + -""".format(encoded) +""" with open(path, 'w') as f: f.write(html) diff --git a/torch/serialization.py b/torch/serialization.py index 5c2e4ecd68551..7b600f76f218f 100644 --- a/torch/serialization.py +++ b/torch/serialization.py @@ -1221,7 +1221,7 @@ def persistent_load(saved_id): res = typed_storage return res else: - raise RuntimeError("Unknown saved id type: %s" % saved_id[0]) + raise RuntimeError(f"Unknown saved id type: {saved_id[0]}") _check_seekable(f) f_should_read_directly = _should_read_directly(f) @@ -1250,7 +1250,7 @@ def persistent_load(saved_id): raise RuntimeError("Invalid magic number; corrupt file?") protocol_version = pickle_module.load(f, **pickle_load_args) if protocol_version != PROTOCOL_VERSION: - raise RuntimeError("Invalid protocol version: %s" % protocol_version) + raise RuntimeError(f"Invalid protocol version: {protocol_version}") _sys_info = pickle_module.load(f, **pickle_load_args) unpickler = UnpicklerWrapper(f, **pickle_load_args) diff --git a/torch/testing/_internal/codegen/random_topo_test.py b/torch/testing/_internal/codegen/random_topo_test.py index b94f8f60a301d..2c526693a3e44 100644 --- a/torch/testing/_internal/codegen/random_topo_test.py +++ b/torch/testing/_internal/codegen/random_topo_test.py @@ -390,8 +390,7 @@ def parse_args(): if len(failing_repros) == 0: print("test passed") else: - print("{} out of {} tests failed;".format( - len(failing_repros), args.iterations)) + print(f"{len(failing_repros)} out of {args.iterations} tests failed;") print("To repro failing tests, run\n") for repro in failing_repros: print(repro) diff --git a/torch/testing/_internal/common_distributed.py b/torch/testing/_internal/common_distributed.py index d1cf02749b79c..aad7679c54461 100644 --- a/torch/testing/_internal/common_distributed.py +++ b/torch/testing/_internal/common_distributed.py @@ -808,9 +808,7 @@ def _check_return_codes(self, elapsed_time) -> None: for i, p in enumerate(self.processes): if p.exitcode is None: raise RuntimeError( - "Process {} terminated or timed out after {} seconds".format( - i, elapsed_time - ) + f"Process {i} terminated or timed out after {elapsed_time} seconds" ) self.assertEqual( p.exitcode, @@ -835,9 +833,7 @@ def _check_return_codes(self, elapsed_time) -> None: self.assertEqual( first_process.exitcode, 0, - msg="Expected zero exit code but got {} for pid: {}".format( - first_process.exitcode, first_process.pid - ), + msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}", ) @property @@ -1091,9 +1087,7 @@ def _check_return_codes(cls, failed_ranks, timeout, fn): if skip_code < 0: skip_code = TEST_SKIPS["generic"].exit_code elif isinstance(exc, TimeoutError): - msg = "Thread {} terminated or timed out after {} seconds\n".format( - rank, timeout - ) + msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n" logger.error(msg) raise RuntimeError(msg) elif isinstance(exc, Exception): diff --git a/torch/testing/_internal/common_nn.py b/torch/testing/_internal/common_nn.py index 85171e71b34d7..f1304bba4f232 100644 --- a/torch/testing/_internal/common_nn.py +++ b/torch/testing/_internal/common_nn.py @@ -2542,13 +2542,13 @@ def unsqueeze_inp(inp): dict( module_name=f'Conv{d}d', constructor_args=(2, 3, 3, 2, padding, 1, 1, True, padding_mode), - cpp_constructor_args='''torch::nn::Conv{}dOptions(2, 3, 3) + cpp_constructor_args=f'''torch::nn::Conv{d}dOptions(2, 3, 3) .stride(2) - .padding({}) + .padding({cpp_padding}) .dilation(1) .groups(1) .bias(true) - .padding_mode({})'''.format(d, cpp_padding, cpp_padding_mode), + .padding_mode({cpp_padding_mode})''', input_size=input_size, output_size=output_size, cudnn=True, diff --git a/torch/testing/_internal/common_quantization.py b/torch/testing/_internal/common_quantization.py index bad76f8be66e9..c4bbfa095db99 100644 --- a/torch/testing/_internal/common_quantization.py +++ b/torch/testing/_internal/common_quantization.py @@ -772,8 +772,7 @@ def _get_underlying_op_type( self.assertTrue( len(matched_subgraph_pairs) == len(expected_types), - 'Expected length of results to match, but got %d and %d' % - (len(matched_subgraph_pairs), len(expected_types)) + f'Expected length of results to match, but got {len(matched_subgraph_pairs)} and {len(expected_types)}' ) for k, v in expected_types.items(): expected_types_a, expected_types_b = v diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py index 359ae1cc2d6cb..8c2c8f86b3941 100644 --- a/torch/testing/_internal/common_utils.py +++ b/torch/testing/_internal/common_utils.py @@ -462,7 +462,7 @@ def _formatted_str_repr(self, name, value): return value.formatted_name else: # Include name and value separated by underscore. - return '{}_{}'.format(name, str(value).replace('.', '_')) + return f"{name}_{str(value).replace('.', '_')}" def _default_subtest_name(self, values): return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)]) diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py index 5fdc796310d44..dfde9b756e7f6 100644 --- a/torch/testing/_internal/distributed/distributed_test.py +++ b/torch/testing/_internal/distributed/distributed_test.py @@ -654,10 +654,7 @@ def test_dump_DDP_relevant_env_vars(self): lines = out.getvalue().splitlines() def format_line(var): - return "env:{}={}".format( - var, - os.environ[var] if var in os.environ else "N/A", - ) + return f"env:{var}={os.environ[var] if var in os.environ else 'N/A'}" # Check relevant env vars vars = [ diff --git a/torch/testing/_internal/distributed/rpc/rpc_test.py b/torch/testing/_internal/distributed/rpc/rpc_test.py index 47b13a837a035..f2ef164d96ff8 100644 --- a/torch/testing/_internal/distributed/rpc/rpc_test.py +++ b/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -914,9 +914,7 @@ def _stress_test_rpc(self, f, repeat=1000, args=()): self.assertEqual(val, 0) tok = time.time() print( - "Rank {} finished testing {} times in {} seconds.".format( - self.rank, repeat, tok - tik - ) + f"Rank {self.rank} finished testing {repeat} times in {tok - tik} seconds." ) def _builtin_remote_ret(self, x, y, expected): diff --git a/torch/testing/_internal/jit_metaprogramming_utils.py b/torch/testing/_internal/jit_metaprogramming_utils.py index 88137fd1029a1..478dc5006ff0f 100644 --- a/torch/testing/_internal/jit_metaprogramming_utils.py +++ b/torch/testing/_internal/jit_metaprogramming_utils.py @@ -639,7 +639,7 @@ def get_nn_mod_test_name(**kwargs): else: test_name = get_nn_module_name_from_kwargs(**kwargs) if 'desc' in kwargs: - test_name = "{}_{}".format(test_name, kwargs['desc']) + test_name = f"{test_name}_{kwargs['desc']}" return f'test_nn_{test_name}' def get_nn_module_class_from_kwargs(**kwargs): @@ -659,7 +659,7 @@ def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs): test_name = name if 'desc' in kwargs: - test_name = "{}_{}".format(test_name, kwargs['desc']) + test_name = f"{test_name}_{kwargs['desc']}" test_name = get_nn_mod_test_name(**kwargs) if test_name in EXCLUDE_SCRIPT_MODULES: diff --git a/torch/testing/_internal/jit_utils.py b/torch/testing/_internal/jit_utils.py index 2f6675234d3e7..bdeace15a71d3 100644 --- a/torch/testing/_internal/jit_utils.py +++ b/torch/testing/_internal/jit_utils.py @@ -316,8 +316,7 @@ def perform_assert(graph, kind, actual, expected, consider_subgraphs): return subgraph = 'including' if consider_subgraphs else 'excluding' raise AssertionError( - '{}\nError: graph contains {} {} nodes ({} subgraphs) but expected {}'.format( - graph, actual, kind, subgraph, expected)) + f'{graph}\nError: graph contains {actual} {kind} nodes ({subgraph} subgraphs) but expected {expected}') if consider_subgraphs: strgraph = str(graph) diff --git a/torch/testing/_internal/opinfo/core.py b/torch/testing/_internal/opinfo/core.py index 6694a7483c641..cb705edc346c4 100644 --- a/torch/testing/_internal/opinfo/core.py +++ b/torch/testing/_internal/opinfo/core.py @@ -1341,7 +1341,7 @@ def formatted_name(self): if self.variant_test_name else "" ) - return "{}{}".format(self.name.replace(".", "_"), variant) + return f"{self.name.replace('.', '_')}{variant}" def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs): diff --git a/torch/testing/_internal/opinfo/utils.py b/torch/testing/_internal/opinfo/utils.py index 017f26f7478c6..bc46fe141c165 100644 --- a/torch/testing/_internal/opinfo/utils.py +++ b/torch/testing/_internal/opinfo/utils.py @@ -139,16 +139,12 @@ def is_dynamic_dtype_set(op): def str_format_dynamic_dtype(op): - fmt_str = """ - OpInfo({name}, - dtypes={dtypes}, - dtypesIfCUDA={dtypesIfCUDA}, + fmt_str = f""" + OpInfo({op.name}, + dtypes={dtypes_dispatch_hint(op.dtypes).dispatch_fn_str}, + dtypesIfCUDA={dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str}, ) - """.format( - name=op.name, - dtypes=dtypes_dispatch_hint(op.dtypes).dispatch_fn_str, - dtypesIfCUDA=dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str, - ) + """ return fmt_str diff --git a/torch/utils/bundled_inputs.py b/torch/utils/bundled_inputs.py index ad34e15e6bfa1..df2d771f6f51d 100644 --- a/torch/utils/bundled_inputs.py +++ b/torch/utils/bundled_inputs.py @@ -358,12 +358,12 @@ def get_num_bundled_inputs(self): """)) # Define some high level helper methods that act on all bundled inputs - model.define(textwrap.dedent(""" + model.define(textwrap.dedent(f""" def get_bundled_inputs_functions_and_info(self): all_inputs : Dict[str, Dict[str,List[str]]] = {{}} - {template} + {get_bundled_inputs_functions_and_info_template} return all_inputs - """.format(template=get_bundled_inputs_functions_and_info_template))) + """)) def _inflate_expr( arg: T, ref: str, inflate_helper_fn_name: str, skip_size_check: bool = False diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py index 323a391a684ed..ad65993db2793 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py @@ -1834,7 +1834,7 @@ def _get_rocm_arch_flags(cflags: Optional[List[str]] = None) -> List[str]: archs = [] else: archs = _archs.replace(' ', ';').split(';') - flags = ['--offload-arch=%s' % arch for arch in archs] + flags = [f'--offload-arch={arch}' for arch in archs] flags += ['-fno-gpu-rdc'] return flags diff --git a/torch/utils/data/_utils/worker.py b/torch/utils/data/_utils/worker.py index 0d43f63a6a2f2..3403bf24cb4c2 100644 --- a/torch/utils/data/_utils/worker.py +++ b/torch/utils/data/_utils/worker.py @@ -83,7 +83,7 @@ def __repr__(self): items = [] for k in self.__keys: items.append(f'{k}={getattr(self, k)}') - return '{}({})'.format(self.__class__.__name__, ', '.join(items)) + return f"{self.__class__.__name__}({', '.join(items)})" def get_worker_info() -> Optional[WorkerInfo]: diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py index 1c33592f02f14..cae1d30d0688f 100644 --- a/torch/utils/data/dataloader.py +++ b/torch/utils/data/dataloader.py @@ -305,14 +305,12 @@ def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, # We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default. elif shuffle not in {False, None}: raise ValueError( - "DataLoader with IterableDataset: expected unspecified " - "shuffle option, but got shuffle={}".format(shuffle)) + f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}") if sampler is not None: # See NOTE [ Custom Samplers and IterableDataset ] raise ValueError( - "DataLoader with IterableDataset: expected unspecified " - "sampler option, but got sampler={}".format(sampler)) + f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}") elif batch_sampler is not None: # See NOTE [ Custom Samplers and IterableDataset ] raise ValueError( @@ -418,8 +416,7 @@ def multiprocessing_context(self, multiprocessing_context): def __setattr__(self, attr, val): if self.__initialized and attr in ( 'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'): - raise ValueError('{} attribute should not be set after {} is ' - 'initialized'.format(attr, self.__class__.__name__)) + raise ValueError(f'{attr} attribute should not be set after {self.__class__.__name__} is initialized') super().__setattr__(attr, val) diff --git a/torch/utils/data/datapipes/_decorator.py b/torch/utils/data/datapipes/_decorator.py index 96b7e00e076f0..d2afd996f54ba 100644 --- a/torch/utils/data/datapipes/_decorator.py +++ b/torch/utils/data/datapipes/_decorator.py @@ -95,8 +95,7 @@ def __call__(self, *args, **kwargs): # Decorate with a functional argument if not (isinstance(args[0], Type) and # type: ignore[arg-type] issubclass(args[0], IterDataPipe)): - raise TypeError("Only `IterDataPipe` can be decorated, but {} is found" - .format(args[0].__name__)) + raise TypeError(f"Only `IterDataPipe` can be decorated, but {args[0].__name__} is found") self.cls = args[0] return self.deterministic_wrapper_fn @@ -129,8 +128,7 @@ def wrapper(*args, **kwargs): if argument_name in hints and isinstance(hints[argument_name], _DataPipeMeta): hint = hints[argument_name] if not isinstance(value, IterDataPipe): - raise TypeError("Expected argument '{}' as a IterDataPipe, but found {}" - .format(argument_name, type(value))) + raise TypeError(f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}") if not value.type.issubtype(hint.type): raise TypeError("Expected type of argument '{}' as a subtype of " "hint {}, but found {}" @@ -167,8 +165,7 @@ def runtime_validation(f): # TODO: # Can be extended to validate '__getitem__' and nonblocking if f.__name__ != '__iter__': - raise TypeError("Can not decorate function {} with 'runtime_validation'" - .format(f.__name__)) + raise TypeError(f"Can not decorate function {f.__name__} with 'runtime_validation'") @wraps(f) def wrapper(self): @@ -179,8 +176,7 @@ def wrapper(self): it = f(self) for d in it: if not self.type.issubtype_of_instance(d): - raise RuntimeError("Expected an instance as subtype of {}, but found {}({})" - .format(self.type, d, type(d))) + raise RuntimeError(f"Expected an instance as subtype of {self.type}, but found {d}({type(d)})") yield d return wrapper diff --git a/torch/utils/data/datapipes/_typing.py b/torch/utils/data/datapipes/_typing.py index 68049ba30d901..a6523a18787c4 100644 --- a/torch/utils/data/datapipes/_typing.py +++ b/torch/utils/data/datapipes/_typing.py @@ -308,8 +308,7 @@ def _getitem_(self, params): t = _DataPipeType(params[0]) if not t.issubtype(self.type): - raise TypeError('Can not subclass a DataPipe[{}] from DataPipe[{}]' - .format(t, self.type)) + raise TypeError(f'Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]') # Types are equal, fast path for inheritance if self.type == t: @@ -388,8 +387,7 @@ def _dp_init_subclass(sub_cls, *args, **kwargs): param = _eval_type(sub_cls.type.param, base_globals, locals()) sub_cls.type.param = param except TypeError as e: - raise TypeError("{} is not supported by Python typing" - .format(sub_cls.type.param.__forward_arg__)) from e + raise TypeError(f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing") from e if '__iter__' in sub_cls.__dict__: iter_fn = sub_cls.__dict__['__iter__'] @@ -421,8 +419,7 @@ def reinforce_type(self, expected_type): _type_check(expected_type, msg="'expected_type' must be a type") if not issubtype(expected_type, self.type.param): - raise TypeError("Expected 'expected_type' as subtype of {}, but found {}" - .format(self.type, _type_repr(expected_type))) + raise TypeError(f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}") self.type = _DataPipeType(expected_type) return self diff --git a/torch/utils/data/datapipes/dataframe/dataframes.py b/torch/utils/data/datapipes/dataframe/dataframes.py index 72d93cde66c3c..b6d406b07c856 100644 --- a/torch/utils/data/datapipes/dataframe/dataframes.py +++ b/torch/utils/data/datapipes/dataframe/dataframes.py @@ -177,7 +177,7 @@ def __init__(self, ctx=None, **kwargs): class CaptureA(CaptureF): def __str__(self): - return '{name}'.format(name=self.kwargs['name']) + return f"{self.kwargs['name']}" def execute(self): value = self.kwargs['real_attribute'] @@ -247,7 +247,7 @@ def __init__(self, value, ctx): raise Exception('Attempting to create capture variable with capture off') self.ctx = ctx self.value = value - self.name = 'var_%s' % CaptureVariable.names_idx + self.name = f'var_{CaptureVariable.names_idx}' CaptureVariable.names_idx += 1 self.ctx['variables'].append(self) @@ -352,7 +352,7 @@ def get_val(capture): if isinstance(capture, Capture): return capture.execute() elif isinstance(capture, str): - return '"%s"' % capture + return f'"{capture}"' else: return capture @@ -361,7 +361,7 @@ class CaptureInitial(CaptureVariable): def __init__(self, schema_df=None): new_ctx: Dict[str, List[Any]] = {'operations': [], 'variables': [], 'schema_df': schema_df} super().__init__(None, new_ctx) - self.name = 'input_%s' % self.name + self.name = f'input_{self.name}' class CaptureDataFrame(CaptureInitial): diff --git a/torch/utils/data/datapipes/utils/common.py b/torch/utils/data/datapipes/utils/common.py index 99ae0cb4cbd02..cfa6cd95b524d 100644 --- a/torch/utils/data/datapipes/utils/common.py +++ b/torch/utils/data/datapipes/utils/common.py @@ -213,8 +213,7 @@ def get_file_binaries_from_pathnames(pathnames: Iterable, mode: str, encoding: O for pathname in pathnames: if not isinstance(pathname, str): - raise TypeError("Expected string type for pathname, but got {}" - .format(type(pathname))) + raise TypeError(f"Expected string type for pathname, but got {type(pathname)}") yield pathname, StreamWrapper(open(pathname, mode, encoding=encoding)) diff --git a/torch/utils/data/distributed.py b/torch/utils/data/distributed.py index 8358204bd6237..4f99cbb3ff517 100644 --- a/torch/utils/data/distributed.py +++ b/torch/utils/data/distributed.py @@ -72,8 +72,7 @@ def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None, rank = dist.get_rank() if rank >= num_replicas or rank < 0: raise ValueError( - "Invalid rank {}, rank should be in the interval" - " [0, {}]".format(rank, num_replicas - 1)) + f"Invalid rank {rank}, rank should be in the interval [0, {num_replicas - 1}]") self.dataset = dataset self.num_replicas = num_replicas self.rank = rank diff --git a/torch/utils/data/sampler.py b/torch/utils/data/sampler.py index 606f9ec5b6314..1e56df5adc8a4 100644 --- a/torch/utils/data/sampler.py +++ b/torch/utils/data/sampler.py @@ -134,12 +134,10 @@ def __init__(self, data_source: Sized, replacement: bool = False, self.generator = generator if not isinstance(self.replacement, bool): - raise TypeError("replacement should be a boolean value, but got " - "replacement={}".format(self.replacement)) + raise TypeError(f"replacement should be a boolean value, but got replacement={self.replacement}") if not isinstance(self.num_samples, int) or self.num_samples <= 0: - raise ValueError("num_samples should be a positive integer " - "value, but got num_samples={}".format(self.num_samples)) + raise ValueError(f"num_samples should be a positive integer value, but got num_samples={self.num_samples}") @property def num_samples(self) -> int: @@ -218,11 +216,9 @@ def __init__(self, weights: Sequence[float], num_samples: int, replacement: bool = True, generator=None) -> None: if not isinstance(num_samples, int) or isinstance(num_samples, bool) or \ num_samples <= 0: - raise ValueError("num_samples should be a positive integer " - "value, but got num_samples={}".format(num_samples)) + raise ValueError(f"num_samples should be a positive integer value, but got num_samples={num_samples}") if not isinstance(replacement, bool): - raise ValueError("replacement should be a boolean value, but got " - "replacement={}".format(replacement)) + raise ValueError(f"replacement should be a boolean value, but got replacement={replacement}") weights_tensor = torch.as_tensor(weights, dtype=torch.double) if len(weights_tensor.shape) != 1: @@ -264,11 +260,9 @@ def __init__(self, sampler: Union[Sampler[int], Iterable[int]], batch_size: int, # check here. if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \ batch_size <= 0: - raise ValueError("batch_size should be a positive integer value, " - "but got batch_size={}".format(batch_size)) + raise ValueError(f"batch_size should be a positive integer value, but got batch_size={batch_size}") if not isinstance(drop_last, bool): - raise ValueError("drop_last should be a boolean value, but got " - "drop_last={}".format(drop_last)) + raise ValueError(f"drop_last should be a boolean value, but got drop_last={drop_last}") self.sampler = sampler self.batch_size = batch_size self.drop_last = drop_last diff --git a/torch/utils/flop_counter.py b/torch/utils/flop_counter.py index bbddc6735a188..06db034a8dbb6 100644 --- a/torch/utils/flop_counter.py +++ b/torch/utils/flop_counter.py @@ -210,7 +210,7 @@ def get_suffix_str(number): def convert_num_with_suffix(number, suffix): index = suffixes.index(suffix) # Divide the number by 1000^index and format it to two decimal places - value = "{:.3f}".format(number / (1000 ** index)) + value = f"{number / 1000 ** index:.3f}" # Return the value and the suffix as a string return value + suffixes[index] @@ -361,13 +361,13 @@ def process_mod(mod_name, depth): values.append([ padding + mod_name, convert_num_with_suffix(total_flops, global_suffix), - "{:.2f}%".format(total_flops / global_flops * 100) + f"{total_flops / global_flops * 100:.2f}%" ]) for k, v in self.flop_counts[mod_name].items(): values.append([ padding + " - " + str(k), convert_num_with_suffix(v, global_suffix), - "{:.2f}%".format(v / global_flops * 100) + f"{v / global_flops * 100:.2f}%" ]) return values diff --git a/torch/utils/hipify/hipify_python.py b/torch/utils/hipify/hipify_python.py index fa800659595bd..92eccbf018efe 100755 --- a/torch/utils/hipify/hipify_python.py +++ b/torch/utils/hipify/hipify_python.py @@ -77,7 +77,7 @@ def __init__(self, message): self.message = message def __str__(self): - return "{}: {}".format("Input error", self.message) + return f"Input error: {self.message}" def openf(filename, mode): @@ -225,7 +225,7 @@ def compute_stats(stats): print(", ".join(unsupported_calls)) # Print the number of kernel launches - print("\nTotal number of replaced kernel launches: {:d}".format(len(stats["kernel_launches"]))) + print(f"\nTotal number of replaced kernel launches: {len(stats['kernel_launches']):d}") def add_dim3(kernel_string, cuda_kernel): @@ -531,8 +531,7 @@ def replace_extern_shared(input_string): """ output_string = input_string output_string = RE_EXTERN_SHARED.sub( - lambda inp: "HIP_DYNAMIC_SHARED({} {}, {})".format( - inp.group(1) or "", inp.group(2), inp.group(3)), output_string) + lambda inp: f"HIP_DYNAMIC_SHARED({inp.group(1) or ''} {inp.group(2)}, {inp.group(3)})", output_string) return output_string @@ -711,7 +710,7 @@ def _pattern(self, pData): if cconly: result += "?" else: - result = "(?:%s)?" % result + result = f"(?:{result})?" return result def pattern(self): diff --git a/torch/utils/mobile_optimizer.py b/torch/utils/mobile_optimizer.py index 66d57a2372baf..fae6efe265f98 100644 --- a/torch/utils/mobile_optimizer.py +++ b/torch/utils/mobile_optimizer.py @@ -54,8 +54,7 @@ def optimize_for_mobile( non_exist_methods.append(method) if non_exist_methods: raise AttributeError( - 'The following methods to preserve do not exist in script_module: {}' - .format(', '.join(non_exist_methods))) + f"The following methods to preserve do not exist in script_module: {', '.join(non_exist_methods)}") backend = backend.lower() if backend == 'cpu': diff --git a/torch/utils/tensorboard/_convert_np.py b/torch/utils/tensorboard/_convert_np.py index 385719295e82a..4e699d90229a4 100644 --- a/torch/utils/tensorboard/_convert_np.py +++ b/torch/utils/tensorboard/_convert_np.py @@ -22,9 +22,7 @@ def make_np(x): if isinstance(x, torch.Tensor): return _prepare_pytorch(x) raise NotImplementedError( - "Got {}, but numpy array, torch tensor, or caffe2 blob name are expected.".format( - type(x) - ) + f"Got {type(x)}, but numpy array, torch tensor, or caffe2 blob name are expected." ) diff --git a/torch/utils/tensorboard/_pytorch_graph.py b/torch/utils/tensorboard/_pytorch_graph.py index 280b503c515c0..ccfe5d81af1ff 100644 --- a/torch/utils/tensorboard/_pytorch_graph.py +++ b/torch/utils/tensorboard/_pytorch_graph.py @@ -271,9 +271,7 @@ def parse(graph, trace, args=None, omit_useless_nodes=True): parent_attr_key = parent.output().debugName() parent_scope = attr_to_scope[parent_attr_key] attr_scope = parent_scope.split("/")[-1] - attr_to_scope[attr_key] = "{}/{}.{}".format( - parent_scope, attr_scope, attr_name - ) + attr_to_scope[attr_key] = f"{parent_scope}/{attr_scope}.{attr_name}" else: attr_to_scope[attr_key] = f"__module.{attr_name}" # We don't need classtype nodes; scope will provide this information diff --git a/torch/utils/tensorboard/_utils.py b/torch/utils/tensorboard/_utils.py index 6dfd9d5cc80fa..3715b7504ff04 100644 --- a/torch/utils/tensorboard/_utils.py +++ b/torch/utils/tensorboard/_utils.py @@ -95,10 +95,7 @@ def make_grid(I, ncols=8): def convert_to_HWC(tensor, input_format): # tensor: numpy array assert len(set(input_format)) == len( input_format - ), "You can not use the same dimension shordhand twice. \ - input_format: {}".format( - input_format - ) + ), f"You can not use the same dimension shordhand twice. input_format: {input_format}" assert len(tensor.shape) == len( input_format ), "size of input tensor and input format are different. \ diff --git a/torch/utils/tensorboard/summary.py b/torch/utils/tensorboard/summary.py index f821d95d2b99c..e07d2c6b880a0 100644 --- a/torch/utils/tensorboard/summary.py +++ b/torch/utils/tensorboard/summary.py @@ -199,8 +199,7 @@ def hparams(hparam_dict=None, metric_dict=None, hparam_domain_discrete=None): or not all(isinstance(d, type(hparam_dict[k])) for d in v) ): raise TypeError( - "parameter: hparam_domain_discrete[{}] should be a list of same type as " - "hparam_dict[{}].".format(k, k) + f"parameter: hparam_domain_discrete[{k}] should be a list of same type as hparam_dict[{k}]." ) hps = [] diff --git a/torch/utils/tensorboard/writer.py b/torch/utils/tensorboard/writer.py index b592707df2c1e..16817fdda42dd 100644 --- a/torch/utils/tensorboard/writer.py +++ b/torch/utils/tensorboard/writer.py @@ -898,8 +898,8 @@ def add_graph( def _encode(rawstr): # I'd use urllib but, I'm unsure about the differences from python3 to python2, etc. retval = rawstr - retval = retval.replace("%", "%%%02x" % (ord("%"))) - retval = retval.replace("/", "%%%02x" % (ord("/"))) + retval = retval.replace("%", f"%{ord('%'):02x}") + retval = retval.replace("/", f"%{ord('/'):02x}") retval = retval.replace("\\", "%%%02x" % (ord("\\"))) return retval @@ -964,7 +964,7 @@ def add_embedding( ) else: raise Exception( - "Path: `%s` exists, but is a file. Cannot proceed." % save_path + f"Path: `{save_path}` exists, but is a file. Cannot proceed." ) else: fs.makedirs(save_path) diff --git a/torch/utils/viz/_cycles.py b/torch/utils/viz/_cycles.py index 13a425cd1b828..dd481560017a5 100644 --- a/torch/utils/viz/_cycles.py +++ b/torch/utils/viz/_cycles.py @@ -247,15 +247,9 @@ def format_sequence(obj): filename = obj.f_code.co_filename if len(filename) > FRAME_FILENAME_LIMIT: filename = "..." + filename[-(FRAME_FILENAME_LIMIT - 3):] - return "frame\n{}:{}".format( - filename, - obj.f_lineno, - ) + return f"frame\n{filename}:{obj.f_lineno}" else: - return "object\n{}.{}".format( - type(obj).__module__, - type(obj).__name__, - ) + return f"object\n{type(obj).__module__}.{type(obj).__name__}" diff --git a/torchgen/api/autograd.py b/torchgen/api/autograd.py index b76def61c4dfd..a9cf148b77be0 100644 --- a/torchgen/api/autograd.py +++ b/torchgen/api/autograd.py @@ -466,9 +466,7 @@ def gen_foreach_derivativeinfo( DifferentiabilityInfo( name=foreach_function.func.name.name.base, func=foreach_function, - op="Foreach{}{}".format( - ref_diff_info.op, foreach_function.func.name.overload_name - ), + op=f"Foreach{ref_diff_info.op}{foreach_function.func.name.overload_name}", derivatives=modified_derivative_formulas, forward_derivatives=[], all_saved_inputs=tuple(set(all_saved_inputs)), diff --git a/torchgen/selective_build/operator.py b/torchgen/selective_build/operator.py index d7f5c56f63a60..feb4f08bb822e 100644 --- a/torchgen/selective_build/operator.py +++ b/torchgen/selective_build/operator.py @@ -133,10 +133,7 @@ def combine_operators( ) -> "SelectiveBuildOperator": if str(lhs.name) != str(rhs.name): raise Exception( - "Expected both arguments to have the same name, but got '{}' and '{}' instead".format( - str(lhs.name), - str(rhs.name), - ) + f"Expected both arguments to have the same name, but got '{str(lhs.name)}' and '{str(rhs.name)}' instead" ) return SelectiveBuildOperator(