diff --git a/.circleci/scripts/cpp_doc_push_script.sh b/.circleci/scripts/cpp_doc_push_script.sh index eb652a2c8e1c7c..41fc3298f79c67 100755 --- a/.circleci/scripts/cpp_doc_push_script.sh +++ b/.circleci/scripts/cpp_doc_push_script.sh @@ -56,7 +56,7 @@ sudo apt-get -y install doxygen # Generate ATen files pushd "${pt_checkout}" pip install -r requirements.txt -time python -m tools.codegen.gen \ +time python -m torchgen.gen \ -s aten/src/ATen \ -d build/aten/src/ATen diff --git a/.jenkins/pytorch/codegen-test.sh b/.jenkins/pytorch/codegen-test.sh index 290baa7a3b3b71..7b8d0667a86a4c 100755 --- a/.jenkins/pytorch/codegen-test.sh +++ b/.jenkins/pytorch/codegen-test.sh @@ -26,7 +26,7 @@ set -x rm -rf "$OUT" # aten codegen -python -m tools.codegen.gen \ +python -m torchgen.gen \ -d "$OUT"/torch/share/ATen # torch codegen diff --git a/BUILD.bazel b/BUILD.bazel index 06619fddcaeeb4..e1448c65a15941 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -95,7 +95,7 @@ generate_aten( aten_ufunc_generated_cuda_sources("aten/src/ATen/{}") + ["aten/src/ATen/Declarations.yaml"] ), - generator = "//tools/codegen:gen", + generator = "//torchgen:gen", ) libtorch_cpp_generated_sources = [ @@ -1345,7 +1345,7 @@ cc_library( py_binary( name = "gen_op", srcs = ["caffe2/contrib/aten/gen_op.py"], - deps = ["//tools/codegen"], + deps = ["//torchgen"], ) genrule( diff --git a/aten/src/ATen/gen_vulkan_glsl.py b/aten/src/ATen/gen_vulkan_glsl.py index d90afbf6a019a5..b43dcb6cfeff25 100644 --- a/aten/src/ATen/gen_vulkan_glsl.py +++ b/aten/src/ATen/gen_vulkan_glsl.py @@ -4,7 +4,7 @@ import glob import sys import os -from tools.codegen.code_template import CodeTemplate +from torchgen.code_template import CodeTemplate H_NAME = "glsl.h" CPP_NAME = "glsl.cpp" diff --git a/aten/src/ATen/gen_vulkan_spv.py b/aten/src/ATen/gen_vulkan_spv.py index eb3542410a20ea..0d0906ded60e78 100644 --- a/aten/src/ATen/gen_vulkan_spv.py +++ b/aten/src/ATen/gen_vulkan_spv.py @@ -6,7 +6,7 @@ import os import sys import subprocess -from tools.codegen.code_template import CodeTemplate +from torchgen.code_template import CodeTemplate H_NAME = "spv.h" CPP_NAME = "spv.cpp" diff --git a/aten/src/ATen/native/README.md b/aten/src/ATen/native/README.md index a2b50e3ee467df..3c10afef14fa16 100644 --- a/aten/src/ATen/native/README.md +++ b/aten/src/ATen/native/README.md @@ -291,7 +291,7 @@ If two backends have the same dispatch function, you can write `CPU, CUDA: func` to reuse the same function name in both cases. Available backend options can be found by searching `dispatch_keys` in -[codegen](https://github.com/pytorch/pytorch/blob/master/tools/codegen/gen.py). +[codegen](https://github.com/pytorch/pytorch/blob/master/torchgen/gen.py). There are also two special "generic" backends: - `CompositeExplicitAutograd` (previously known as `DefaultBackend`): diff --git a/c10/core/DispatchKey.h b/c10/core/DispatchKey.h index 88492c751935ed..942860566f719b 100644 --- a/c10/core/DispatchKey.h +++ b/c10/core/DispatchKey.h @@ -97,7 +97,7 @@ enum class BackendComponent : uint8_t { // See Note [DispatchKeySet Internal Representation] for more details. // -// NOTE: Keep the list in sync with `DispatchKey` in tools/codegen/model.py +// NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py enum class DispatchKey : uint16_t { // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt index 5045432e7130f3..4d80f6d1722bdf 100644 --- a/caffe2/CMakeLists.txt +++ b/caffe2/CMakeLists.txt @@ -63,7 +63,7 @@ if(INTERN_BUILD_ATEN_OPS) set(CMAKE_POSITION_INDEPENDENT_CODE ${__caffe2_CMAKE_POSITION_INDEPENDENT_CODE}) # Generate the headers wrapped by our operator - file(GLOB_RECURSE all_python "${PROJECT_SOURCE_DIR}/tools/codegen/*.py") + file(GLOB_RECURSE all_python "${PROJECT_SOURCE_DIR}/torchgen/*.py") add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/contrib/aten/aten_op.h COMMAND "${PYTHON_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/contrib/aten/gen_op.py @@ -458,10 +458,10 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE) "${TOOLS_PATH}/autograd/gen_variable_type.py" "${TOOLS_PATH}/autograd/gen_inplace_or_view_type.py" "${TOOLS_PATH}/autograd/load_derivatives.py" - "${TOOLS_PATH}/codegen/gen_backend_stubs.py" - "${TOOLS_PATH}/codegen/gen_lazy_tensor.py" - "${TOOLS_PATH}/codegen/api/lazy.py" - "${TOOLS_PATH}/codegen/dest/lazy_ir.py" + "${TORCH_ROOT}/torchgen/gen_backend_stubs.py" + "${TORCH_ROOT}/torchgen/gen_lazy_tensor.py" + "${TORCH_ROOT}/torchgen/api/lazy.py" + "${TORCH_ROOT}/torchgen/dest/lazy_ir.py" WORKING_DIRECTORY "${TORCH_ROOT}") diff --git a/caffe2/contrib/aten/gen_op.py b/caffe2/contrib/aten/gen_op.py index 93d4bad29f9244..55f1faba2750b6 100755 --- a/caffe2/contrib/aten/gen_op.py +++ b/caffe2/contrib/aten/gen_op.py @@ -37,9 +37,9 @@ raise ValueError('aten_root ({}) does not exist'.format( args.aten_root)) sys.path.insert(0, os.path.join(args.aten_root, '..')) - from tools.codegen.code_template import CodeTemplate as CT + from torchgen.code_template import CodeTemplate as CT else: - from tools.codegen.code_template import CodeTemplate as CT + from torchgen.code_template import CodeTemplate as CT OP_TEMPLATE = CT.from_file( os.path.join(args.template_dir, 'aten_op_template.h')) diff --git a/cmake/Codegen.cmake b/cmake/Codegen.cmake index 952f1e92d5bb7f..aa712b7dfa4bf9 100644 --- a/cmake/Codegen.cmake +++ b/cmake/Codegen.cmake @@ -67,7 +67,7 @@ if(INTERN_BUILD_ATEN_OPS) set_source_files_properties(${CMAKE_CURRENT_LIST_DIR}/../aten/src/ATen/MapAllocator.cpp PROPERTIES COMPILE_FLAGS "-fno-openmp") endif() - file(GLOB_RECURSE all_python "${CMAKE_CURRENT_LIST_DIR}/../tools/codegen/*.py") + file(GLOB_RECURSE all_python "${CMAKE_CURRENT_LIST_DIR}/../torchgen/*.py") set(GEN_ROCM_FLAG) if(USE_ROCM) @@ -148,7 +148,7 @@ if(INTERN_BUILD_ATEN_OPS) endif() set(GEN_COMMAND - "${PYTHON_EXECUTABLE}" -m tools.codegen.gen + "${PYTHON_EXECUTABLE}" -m torchgen.gen --source-path ${CMAKE_CURRENT_LIST_DIR}/../aten/src/ATen --install_dir ${CMAKE_BINARY_DIR}/aten/src/ATen ${GEN_PER_OPERATOR_FLAG} diff --git a/docs/cpp/source/check-doxygen.sh b/docs/cpp/source/check-doxygen.sh index 28c7e5b81ace98..a0c85ece54b134 100755 --- a/docs/cpp/source/check-doxygen.sh +++ b/docs/cpp/source/check-doxygen.sh @@ -16,7 +16,7 @@ pushd "$(dirname "$0")/../../.." cp torch/_utils_internal.py tools/shared -python -m tools.codegen.gen +python -m torchgen.gen python tools/setup_helpers/generate_code.py \ --native-functions-path aten/src/ATen/native/native_functions.yaml diff --git a/test/jit/fixtures_srcs/generate_models.py b/test/jit/fixtures_srcs/generate_models.py index 980e7dd0324e3d..e00153745138ca 100644 --- a/test/jit/fixtures_srcs/generate_models.py +++ b/test/jit/fixtures_srcs/generate_models.py @@ -52,7 +52,7 @@ def div_Tensor_0_3(self: Tensor, other: Tensor) -> Tensor: fbcode/caffe2/torch/csrc/jit/mobile/upgrader_mobile.cpp ``` -python pytorch/tools/codegen/operator_versions/gen_mobile_upgraders.py +python pytorch/torchgen/operator_versions/gen_mobile_upgraders.py ``` 4. Generate the test to cover upgrader. diff --git a/test/mobile/test_upgrader_codegen.py b/test/mobile/test_upgrader_codegen.py index 5a09ad8a877d3b..5ccf9a020a5b54 100644 --- a/test/mobile/test_upgrader_codegen.py +++ b/test/mobile/test_upgrader_codegen.py @@ -2,7 +2,7 @@ from torch.testing._internal.common_utils import TestCase, run_tests -from tools.codegen.operator_versions.gen_mobile_upgraders import ( +from torchgen.operator_versions.gen_mobile_upgraders import ( sort_upgrader, write_cpp, ) diff --git a/tools/autograd/build.bzl b/tools/autograd/build.bzl index 2f7c741b8527c9..a21ca870708c7d 100644 --- a/tools/autograd/build.bzl +++ b/tools/autograd/build.bzl @@ -9,6 +9,6 @@ def define_targets(rules): visibility = ["//:__subpackages__"], deps = [ rules.requirement("PyYAML"), - "//tools/codegen", + "//torchgen:torchgen", ], ) diff --git a/tools/autograd/context.py b/tools/autograd/context.py index cc357f9c7d4175..af1a6025ed8da8 100644 --- a/tools/autograd/context.py +++ b/tools/autograd/context.py @@ -1,6 +1,6 @@ -from tools.codegen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI -from tools.codegen.context import native_function_manager -from tools.codegen.utils import T +from torchgen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI +from torchgen.context import native_function_manager +from torchgen.utils import T import functools from typing import Callable diff --git a/tools/autograd/gen_annotated_fn_args.py b/tools/autograd/gen_annotated_fn_args.py index 8898bb71251f12..d020f1f8e685bf 100644 --- a/tools/autograd/gen_annotated_fn_args.py +++ b/tools/autograd/gen_annotated_fn_args.py @@ -20,11 +20,11 @@ from typing import Dict, List, Any -from tools.codegen.gen import parse_native_yaml -from tools.codegen.utils import FileManager -from tools.codegen.context import with_native_function -from tools.codegen.model import BaseOperatorName, NativeFunction -import tools.codegen.api.python as python +from torchgen.gen import parse_native_yaml +from torchgen.utils import FileManager +from torchgen.context import with_native_function +from torchgen.model import BaseOperatorName, NativeFunction +import torchgen.api.python as python from .gen_python_functions import ( should_generate_py_binding, is_py_torch_function, diff --git a/tools/autograd/gen_autograd.py b/tools/autograd/gen_autograd.py index c6b2b610ffc202..5de8f2ad3a4147 100644 --- a/tools/autograd/gen_autograd.py +++ b/tools/autograd/gen_autograd.py @@ -24,13 +24,13 @@ import argparse import os -from tools.codegen.api import cpp -from tools.codegen.api.autograd import ( +from torchgen.api import cpp +from torchgen.api.autograd import ( match_differentiability_info, NativeFunctionWithDifferentiabilityInfo, ) -from tools.codegen.gen import parse_native_yaml -from tools.codegen.selective_build.selector import SelectiveBuilder +from torchgen.gen import parse_native_yaml +from torchgen.selective_build.selector import SelectiveBuilder from typing import List from . import gen_python_functions from .gen_autograd_functions import ( diff --git a/tools/autograd/gen_autograd_functions.py b/tools/autograd/gen_autograd_functions.py index 35657b5b59966f..3e1e55b82b2fcc 100644 --- a/tools/autograd/gen_autograd_functions.py +++ b/tools/autograd/gen_autograd_functions.py @@ -8,14 +8,14 @@ from typing import List, Sequence, Tuple -from tools.codegen.api.autograd import ( +from torchgen.api.autograd import ( Derivative, DifferentiabilityInfo, SavedAttribute, uses_retain_variables, uses_single_grad, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( Binding, BaseCType, OptionalCType, @@ -32,9 +32,9 @@ ArrayRefCType, optionalIntArrayRefT, ) -from tools.codegen.code_template import CodeTemplate -from tools.codegen.utils import FileManager -from tools.codegen.model import Argument +from torchgen.code_template import CodeTemplate +from torchgen.utils import FileManager +from torchgen.model import Argument FUNCTION_DECLARATION = CodeTemplate( """\ diff --git a/tools/autograd/gen_inplace_or_view_type.py b/tools/autograd/gen_inplace_or_view_type.py index b0e6b1209ce475..94c039eb4b440f 100644 --- a/tools/autograd/gen_inplace_or_view_type.py +++ b/tools/autograd/gen_inplace_or_view_type.py @@ -4,13 +4,13 @@ # if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp # The fallback is expected to mimick this codegen, so we should keep the two in sync. -from tools.codegen.api import cpp -from tools.codegen.api.autograd import ( +from torchgen.api import cpp +from torchgen.api.autograd import ( NativeFunctionWithDifferentiabilityInfo, gen_differentiable_outputs, dispatch_strategy, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( Binding, DispatcherSignature, CType, @@ -21,9 +21,9 @@ intArrayRefT, symIntArrayRefT, ) -from tools.codegen.code_template import CodeTemplate -from tools.codegen.context import with_native_function -from tools.codegen.model import ( +from torchgen.code_template import CodeTemplate +from torchgen.context import with_native_function +from torchgen.model import ( Type, NativeFunction, SelfArgument, @@ -32,7 +32,7 @@ is_foreach_op, ) from typing import List, Optional, Sequence, Tuple, Dict -from tools.codegen.utils import FileManager +from torchgen.utils import FileManager from .context import with_native_function_with_differentiability_info from .gen_trace_type import ( MANUAL_AUTOGRAD, diff --git a/tools/autograd/gen_python_functions.py b/tools/autograd/gen_python_functions.py index 6f31c09d3e08c9..71a14450efcc20 100644 --- a/tools/autograd/gen_python_functions.py +++ b/tools/autograd/gen_python_functions.py @@ -37,10 +37,10 @@ from .gen_trace_type import should_trace -from tools.codegen.code_template import CodeTemplate -from tools.codegen.api import cpp -from tools.codegen.api.types import CppSignatureGroup -from tools.codegen.api.python import ( +from torchgen.code_template import CodeTemplate +from torchgen.api import cpp +from torchgen.api.types import CppSignatureGroup +from torchgen.api.python import ( PythonArgument, PythonSignature, PythonSignatureDeprecated, @@ -57,16 +57,16 @@ namedtuple_fieldnames, signature, ) -from tools.codegen.gen import cpp_string, parse_native_yaml -from tools.codegen.context import with_native_function -from tools.codegen.model import ( +from torchgen.gen import cpp_string, parse_native_yaml +from torchgen.context import with_native_function +from torchgen.model import ( Argument, BaseOperatorName, NativeFunction, Type, Variant, ) -from tools.codegen.utils import split_name_params, YamlLoader, FileManager +from torchgen.utils import split_name_params, YamlLoader, FileManager from typing import Dict, Optional, List, Tuple, Set, Sequence, Callable diff --git a/tools/autograd/gen_trace_type.py b/tools/autograd/gen_trace_type.py index c1e72ad9ecc351..8072c6cad2d958 100644 --- a/tools/autograd/gen_trace_type.py +++ b/tools/autograd/gen_trace_type.py @@ -1,12 +1,12 @@ import itertools from typing import List, Sequence, Union, Dict -from tools.codegen.api.types import DispatcherSignature -from tools.codegen.api import cpp -from tools.codegen.code_template import CodeTemplate -from tools.codegen.context import with_native_function -from tools.codegen.utils import FileManager -from tools.codegen.model import ( +from torchgen.api.types import DispatcherSignature +from torchgen.api import cpp +from torchgen.code_template import CodeTemplate +from torchgen.context import with_native_function +from torchgen.utils import FileManager +from torchgen.model import ( Argument, NativeFunction, SchemaKind, diff --git a/tools/autograd/gen_variable_factories.py b/tools/autograd/gen_variable_factories.py index a1a57cc55cba21..c087d1e01a428e 100644 --- a/tools/autograd/gen_variable_factories.py +++ b/tools/autograd/gen_variable_factories.py @@ -5,13 +5,13 @@ import re from typing import Optional, List -from tools.codegen.api.types import CppSignatureGroup -from tools.codegen.api import cpp -import tools.codegen.api.python as python -from tools.codegen.gen import parse_native_yaml -from tools.codegen.context import with_native_function -from tools.codegen.utils import mapMaybe, FileManager -from tools.codegen.model import NativeFunction, TensorOptionsArguments, Variant +from torchgen.api.types import CppSignatureGroup +from torchgen.api import cpp +import torchgen.api.python as python +from torchgen.gen import parse_native_yaml +from torchgen.context import with_native_function +from torchgen.utils import mapMaybe, FileManager +from torchgen.model import NativeFunction, TensorOptionsArguments, Variant OPTIONAL_TYPE_PATTERN = re.compile(r"c10::optional<(.+)>") TYPE_PATTERN = re.compile(r"(?:const\s+)?([A-Z]\w+)") diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index 8036f641e9aadc..954c4692c4a743 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -52,7 +52,7 @@ AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( Binding, DispatcherSignature, BaseCType, @@ -68,7 +68,7 @@ TupleCType, VectorCType, ) -from tools.codegen.api.autograd import ( +from torchgen.api.autograd import ( DifferentiableInput, NativeFunctionWithDifferentiabilityInfo, SavedAttribute, @@ -76,11 +76,11 @@ gen_differentiable_outputs, is_differentiable, ) -from tools.codegen.api import cpp -from tools.codegen.code_template import CodeTemplate -from tools.codegen.context import native_function_manager, with_native_function -from tools.codegen.utils import mapMaybe, FileManager -from tools.codegen.model import ( +from torchgen.api import cpp +from torchgen.code_template import CodeTemplate +from torchgen.context import native_function_manager, with_native_function +from torchgen.utils import mapMaybe, FileManager +from torchgen.model import ( Argument, NativeFunction, SchemaKind, diff --git a/tools/autograd/load_derivatives.py b/tools/autograd/load_derivatives.py index 084128c700b8ee..5ba8dd7c4d720b 100644 --- a/tools/autograd/load_derivatives.py +++ b/tools/autograd/load_derivatives.py @@ -1,19 +1,19 @@ # Parses derivatives.yaml into autograd functions # # Each autograd function is represented by `DifferentiabilityInfo` containing -# a list of `Derivative`. See `tools.codegen.api.autograd` for the data models. +# a list of `Derivative`. See `torchgen.api.autograd` for the data models. from collections import defaultdict import re from typing import Counter, Sequence, Any, Tuple, List, Set, Dict, Match, Optional import yaml -from tools.codegen.api.autograd import ( +from torchgen.api.autograd import ( Derivative, DifferentiabilityInfo, SavedAttribute, ForwardDerivative, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( Binding, CppSignatureGroup, NamedCType, @@ -31,10 +31,10 @@ OptionalCType, stringT, ) -from tools.codegen.api import cpp -from tools.codegen.gen import parse_native_yaml, get_grouped_by_view_native_functions -from tools.codegen.context import with_native_function -from tools.codegen.model import ( +from torchgen.api import cpp +from torchgen.gen import parse_native_yaml, get_grouped_by_view_native_functions +from torchgen.context import with_native_function +from torchgen.model import ( FunctionSchema, NativeFunction, Variant, @@ -42,7 +42,7 @@ NativeFunctionsViewGroup, OperatorName, ) -from tools.codegen.utils import IDENT_REGEX, split_name_params, YamlLoader, concatMap +from torchgen.utils import IDENT_REGEX, split_name_params, YamlLoader, concatMap _GLOBAL_LOAD_DERIVATIVE_CACHE = {} diff --git a/tools/code_analyzer/gen_oplist.py b/tools/code_analyzer/gen_oplist.py index ea584885f201c3..b5d31b92216747 100644 --- a/tools/code_analyzer/gen_oplist.py +++ b/tools/code_analyzer/gen_oplist.py @@ -7,7 +7,7 @@ from typing import Set, List, Any import yaml -from tools.codegen.selective_build.selector import ( +from torchgen.selective_build.selector import ( combine_selective_builders, SelectiveBuilder, ) diff --git a/tools/jit/gen_unboxing.py b/tools/jit/gen_unboxing.py index e5ed6086688f6a..84090840ec5f85 100644 --- a/tools/jit/gen_unboxing.py +++ b/tools/jit/gen_unboxing.py @@ -3,15 +3,15 @@ import os import pathlib from dataclasses import dataclass -from tools.codegen.api import unboxing -from tools.codegen.api.translate import translate -from tools.codegen.api.types import CppSignatureGroup -from tools.codegen.api.unboxing import convert_arguments -from tools.codegen.context import method_with_native_function -from tools.codegen.gen import parse_native_yaml, cpp_string -from tools.codegen.model import NativeFunction, NativeFunctionsGroup, Variant -from tools.codegen.selective_build.selector import SelectiveBuilder -from tools.codegen.utils import Target, FileManager, mapMaybe, make_file_manager +from torchgen.api import unboxing +from torchgen.api.translate import translate +from torchgen.api.types import CppSignatureGroup +from torchgen.api.unboxing import convert_arguments +from torchgen.context import method_with_native_function +from torchgen.gen import parse_native_yaml, cpp_string +from torchgen.model import NativeFunction, NativeFunctionsGroup, Variant +from torchgen.selective_build.selector import SelectiveBuilder +from torchgen.utils import Target, FileManager, mapMaybe, make_file_manager from typing import Union, Sequence from typing_extensions import Literal diff --git a/tools/linter/clang_tidy/generate_build_files.py b/tools/linter/clang_tidy/generate_build_files.py index dc9033923fc0ee..5b900b1e6a5214 100644 --- a/tools/linter/clang_tidy/generate_build_files.py +++ b/tools/linter/clang_tidy/generate_build_files.py @@ -43,7 +43,7 @@ def run_autogen() -> None: [ sys.executable, "-m", - "tools.codegen.gen", + "torchgen.gen", "-s", "aten/src/ATen", "-d", diff --git a/tools/lite_interpreter/gen_selected_mobile_ops_header.py b/tools/lite_interpreter/gen_selected_mobile_ops_header.py index 6169baa9a0047a..37cd9e6903bf5d 100644 --- a/tools/lite_interpreter/gen_selected_mobile_ops_header.py +++ b/tools/lite_interpreter/gen_selected_mobile_ops_header.py @@ -2,8 +2,8 @@ import argparse import os from typing import Set -from tools.codegen.selective_build.selector import SelectiveBuilder -from tools.codegen.code_template import CodeTemplate +from torchgen.selective_build.selector import SelectiveBuilder +from torchgen.code_template import CodeTemplate import yaml diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py index f85d99b43ed2c0..ec27f3b8a1b233 100644 --- a/tools/pyi/gen_pyi.py +++ b/tools/pyi/gen_pyi.py @@ -2,14 +2,14 @@ import collections from pprint import pformat -from tools.codegen.model import Variant -from tools.codegen.api.python import ( +from torchgen.model import Variant +from torchgen.api.python import ( PythonSignatureGroup, PythonSignatureNativeFunctionPair, returns_named_tuple_pyi, ) -from tools.codegen.gen import parse_native_yaml -from tools.codegen.utils import FileManager +from torchgen.gen import parse_native_yaml +from torchgen.utils import FileManager from typing import Sequence, List, Dict from tools.autograd.gen_python_functions import ( diff --git a/tools/setup_helpers/BUILD.bazel b/tools/setup_helpers/BUILD.bazel index 0e203061919e0d..28dcd1b5b47c9f 100644 --- a/tools/setup_helpers/BUILD.bazel +++ b/tools/setup_helpers/BUILD.bazel @@ -3,7 +3,7 @@ py_binary( srcs = ["generate_code.py"], deps = [ "//tools/autograd", - "//tools/codegen", + "//torchgen", ], visibility = ["//:__pkg__"], ) diff --git a/tools/setup_helpers/build.bzl b/tools/setup_helpers/build.bzl index 73fd86de3aa8d8..c5be13e4603b47 100644 --- a/tools/setup_helpers/build.bzl +++ b/tools/setup_helpers/build.bzl @@ -6,7 +6,7 @@ def define_targets(rules): deps = [ rules.requirement("PyYAML"), "//tools/autograd", - "//tools/codegen", + "//torchgen", ], ) diff --git a/tools/setup_helpers/gen.py b/tools/setup_helpers/gen.py index bdb52ee44efbe1..3ca9a878790602 100644 --- a/tools/setup_helpers/gen.py +++ b/tools/setup_helpers/gen.py @@ -6,6 +6,6 @@ root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) -import tools.codegen.gen +import torchgen.gen -tools.codegen.gen.main() +torchgen.gen.main() diff --git a/tools/setup_helpers/generate_code.py b/tools/setup_helpers/generate_code.py index 2b279dc4570b84..3a218e3c00f167 100644 --- a/tools/setup_helpers/generate_code.py +++ b/tools/setup_helpers/generate_code.py @@ -38,7 +38,7 @@ def generate_code( ) -> None: from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python from tools.autograd.gen_annotated_fn_args import gen_annotated - from tools.codegen.selective_build.selector import SelectiveBuilder + from torchgen.selective_build.selector import SelectiveBuilder # Build ATen based Variable classes if install_dir is None: @@ -98,7 +98,7 @@ def get_selector_from_legacy_operator_selection_list( is_root_operator = True is_used_for_training = True - from tools.codegen.selective_build.selector import SelectiveBuilder + from torchgen.selective_build.selector import SelectiveBuilder selector = SelectiveBuilder.from_legacy_op_registration_allow_list( selected_op_list, @@ -116,7 +116,7 @@ def get_selector( # cwrap depends on pyyaml, so we can't import it earlier root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, root) - from tools.codegen.selective_build.selector import SelectiveBuilder + from torchgen.selective_build.selector import SelectiveBuilder assert not ( selected_op_list_path is not None and operators_yaml_path is not None @@ -203,8 +203,8 @@ def main() -> None: assert os.path.isfile( ts_native_functions ), f"Unable to access {ts_native_functions}" - from tools.codegen.gen_lazy_tensor import run_gen_lazy_tensor - from tools.codegen.dest.lazy_ir import GenTSLazyIR + from torchgen.gen_lazy_tensor import run_gen_lazy_tensor + from torchgen.dest.lazy_ir import GenTSLazyIR run_gen_lazy_tensor( aten_path=aten_path, diff --git a/tools/test/test_codegen.py b/tools/test/test_codegen.py index 4e4842418b0bd7..a35a65be2d1b9c 100644 --- a/tools/test/test_codegen.py +++ b/tools/test/test_codegen.py @@ -4,12 +4,12 @@ from tools.autograd import gen_autograd_functions from tools.autograd import load_derivatives -import tools.codegen.model +import torchgen.model class TestCreateDerivative(unittest.TestCase): def test_named_grads(self) -> None: - schema = tools.codegen.model.FunctionSchema.parse( + schema = torchgen.model.FunctionSchema.parse( "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)" ) native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema) @@ -24,7 +24,7 @@ def test_named_grads(self) -> None: def test_non_differentiable_output(self) -> None: specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)" - schema = tools.codegen.model.FunctionSchema.parse(specification) + schema = torchgen.model.FunctionSchema.parse(specification) native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema) differentiability_info = load_derivatives.create_differentiability_info( @@ -46,7 +46,7 @@ def test_non_differentiable_output(self) -> None: ) def test_indexed_grads(self) -> None: - schema = tools.codegen.model.FunctionSchema.parse( + schema = torchgen.model.FunctionSchema.parse( "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)" ) native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema) @@ -61,7 +61,7 @@ def test_indexed_grads(self) -> None: def test_named_grads_and_indexed_grads(self) -> None: specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)" - schema = tools.codegen.model.FunctionSchema.parse(specification) + schema = torchgen.model.FunctionSchema.parse(specification) native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema) with self.assertRaisesRegex( @@ -84,7 +84,7 @@ def test_named_grads_and_indexed_grads(self) -> None: class TestGenAutogradFunctions(unittest.TestCase): def test_non_differentiable_output_invalid_type(self) -> None: specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)" - schema = tools.codegen.model.FunctionSchema.parse(specification) + schema = torchgen.model.FunctionSchema.parse(specification) native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema) differentiability_info = load_derivatives.create_differentiability_info( @@ -107,7 +107,7 @@ def test_non_differentiable_output_invalid_type(self) -> None: def test_non_differentiable_output_output_differentiability(self) -> None: specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y, Tensor z)" - schema = tools.codegen.model.FunctionSchema.parse(specification) + schema = torchgen.model.FunctionSchema.parse(specification) native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema) differentiability_info = load_derivatives.create_differentiability_info( @@ -132,8 +132,8 @@ def test_non_differentiable_output_output_differentiability(self) -> None: # Represents the most basic NativeFunction. Use dataclasses.replace() # to edit for use. -DEFAULT_NATIVE_FUNCTION, _ = tools.codegen.model.NativeFunction.from_yaml( - {"func": "func() -> bool"}, loc=tools.codegen.model.Location(__file__, 1) +DEFAULT_NATIVE_FUNCTION, _ = torchgen.model.NativeFunction.from_yaml( + {"func": "func() -> bool"}, loc=torchgen.model.Location(__file__, 1) ) diff --git a/tools/test/test_codegen_model.py b/tools/test/test_codegen_model.py index 59f9563016ec71..eb597152b44b96 100644 --- a/tools/test/test_codegen_model.py +++ b/tools/test/test_codegen_model.py @@ -5,10 +5,10 @@ import yaml import textwrap -from tools.codegen.model import NativeFunctionsGroup, DispatchKey -import tools.codegen.dest as dest -import tools.codegen.gen as gen -from tools.codegen.gen import LineLoader, parse_native_yaml_struct +from torchgen.model import NativeFunctionsGroup, DispatchKey +import torchgen.dest as dest +import torchgen.gen as gen +from torchgen.gen import LineLoader, parse_native_yaml_struct class TestCodegenModel(expecttest.TestCase): diff --git a/tools/test/test_gen_backend_stubs.py b/tools/test/test_gen_backend_stubs.py index 0024737d8c85be..168ae8b1d7c738 100644 --- a/tools/test/test_gen_backend_stubs.py +++ b/tools/test/test_gen_backend_stubs.py @@ -5,11 +5,11 @@ import unittest import expecttest -from tools.codegen.gen_backend_stubs import run -from tools.codegen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE # noqa: F401 +from torchgen.gen_backend_stubs import run +from torchgen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE # noqa: F401 path = os.path.dirname(os.path.realpath(__file__)) -gen_backend_stubs_path = os.path.join(path, "../tools/codegen/gen_backend_stubs.py") +gen_backend_stubs_path = os.path.join(path, "../torchgen/gen_backend_stubs.py") # gen_backend_stubs.py is an integration point that is called directly by external backends. # The tests here are to confirm that badly formed inputs result in reasonable error messages. diff --git a/torch/csrc/jit/operator_upgraders/README.md b/torch/csrc/jit/operator_upgraders/README.md index 544185a6c20dc5..084e6688f148ef 100644 --- a/torch/csrc/jit/operator_upgraders/README.md +++ b/torch/csrc/jit/operator_upgraders/README.md @@ -145,7 +145,7 @@ When making changes to the operators, the first thing to identify is if it's BC/ 5. After [rebuilding PyTorch](https://github.com/pytorch/pytorch#from-source), run the following command to auto update the file [`torch/csrc/jit/mobile/upgrader_mobile.cpp`](https://github.com/pytorch/pytorch/blob/8757e21c6a4fc00e83539aa7f9c28eb11eff53c1/torch/csrc/jit/mobile/upgrader_mobile.cpp). After rebuild PyTorch from source (`python setup.py`), run ``` - python pytorch/tools/codegen/operator_versions/gen_mobile_upgraders.py + python pytorch/torchgen/operator_versions/gen_mobile_upgraders.py ``` 6. Add a test. With the model generated from step 1, you will need to add tests in `test/test_save_load_for_op_versions.py`. Following is an example to write a test diff --git a/torch/csrc/jit/runtime/decomposition_registry_util.cpp b/torch/csrc/jit/runtime/decomposition_registry_util.cpp index e6fe44b5fdccc0..da972bfce4f808 100644 --- a/torch/csrc/jit/runtime/decomposition_registry_util.cpp +++ b/torch/csrc/jit/runtime/decomposition_registry_util.cpp @@ -3,7 +3,7 @@ * @generated * This is an auto-generated file. Please do not modify it by hand. * To re-generate, please run: - * cd ~/pytorch && python tools/codegen/decompositions/gen_jit_decompositions.py + * cd ~/pytorch && python torchgen/decompositions/gen_jit_decompositions.py */ #include #include diff --git a/torch/csrc/jit/tensorexpr/codegen_external.py b/torch/csrc/jit/tensorexpr/codegen_external.py index fbde2212de0d49..6b9fac58f7570e 100644 --- a/torch/csrc/jit/tensorexpr/codegen_external.py +++ b/torch/csrc/jit/tensorexpr/codegen_external.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 import argparse -from tools.codegen.gen import parse_native_yaml, FileManager -import tools.codegen.model as model +from torchgen.gen import parse_native_yaml, FileManager +import torchgen.model as model def num_leading_spaces(line: str) -> int: return len(line) - len(line.lstrip()) diff --git a/torch/csrc/jit/tensorexpr/external_functions_codegen.cpp b/torch/csrc/jit/tensorexpr/external_functions_codegen.cpp index 29f29a28e6db68..5e049b397d2de9 100644 --- a/torch/csrc/jit/tensorexpr/external_functions_codegen.cpp +++ b/torch/csrc/jit/tensorexpr/external_functions_codegen.cpp @@ -1,4 +1,4 @@ -// @generated by tools/codegen/gen.py from +// @generated by torchgen/gen.py from // external_functions_codegen_template.cpp #include diff --git a/torch/csrc/lazy/generated/README.md b/torch/csrc/lazy/generated/README.md index 6712a90afea2a3..0a0c1b0f4d0c2d 100644 --- a/torch/csrc/lazy/generated/README.md +++ b/torch/csrc/lazy/generated/README.md @@ -3,7 +3,7 @@ This folder contains generated sources for the lazy torchscript backend. The main input file that drives which operators get codegen support for torchscript backend is [../../../../aten/src/ATen/native/ts_native_functions.yaml](../../../../aten/src/ATen/native/ts_native_functions.yaml) -The code generator lives at `tools/codegen/gen_lazy_tensor.py`. +The code generator lives at `torchgen/gen_lazy_tensor.py`. It is called automatically by the torch autograd codegen (`tools/setup_helpers/generate_code.py`) as a part of the build process in OSS builds (CMake/Bazel) and Buck. @@ -12,7 +12,7 @@ External backends (e.g. torch/xla) call `gen_lazy_tensor.py` directly, and feed it command line args indicating where the output files should go. For more information on codegen, see these resources: -* Info about lazy tensor codegen: [gen_lazy_tensor.py docs](../../../../tools/codegen/gen_lazy_tensor.py) +* Info about lazy tensor codegen: [gen_lazy_tensor.py docs](../../../../torchgen/gen_lazy_tensor.py) * Lazy TorchScript backend native functions: [ts_native_functions.yaml](../../../../aten/src/ATen/native/ts_native_functions.yaml) * Source of truth for native func definitions [ATen native_functions.yaml](../../../../aten/src/ATen/native/native_functions.yaml) * Info about native functions [ATen nativefunc README.md](../../../../aten/src/ATen/native/README.md) diff --git a/tools/codegen/BUILD.bazel b/torchgen/BUILD.bazel similarity index 100% rename from tools/codegen/BUILD.bazel rename to torchgen/BUILD.bazel diff --git a/tools/codegen/__init__.py b/torchgen/__init__.py similarity index 100% rename from tools/codegen/__init__.py rename to torchgen/__init__.py diff --git a/tools/codegen/api/__init__.py b/torchgen/api/__init__.py similarity index 100% rename from tools/codegen/api/__init__.py rename to torchgen/api/__init__.py diff --git a/tools/codegen/api/autograd.py b/torchgen/api/autograd.py similarity index 99% rename from tools/codegen/api/autograd.py rename to torchgen/api/autograd.py index 15df49fd0ebe9e..01875dcb006c44 100644 --- a/tools/codegen/api/autograd.py +++ b/torchgen/api/autograd.py @@ -2,15 +2,15 @@ import re from typing import Optional, Sequence, Set, List, Tuple, Match -from tools.codegen.api import cpp -from tools.codegen.api.types import Binding, NamedCType -from tools.codegen.model import ( +from torchgen.api import cpp +from torchgen.api.types import Binding, NamedCType +from torchgen.model import ( NativeFunction, Type, SchemaKind, NativeFunctionsViewGroup, ) -from tools.codegen.utils import IDENT_REGEX +from torchgen.utils import IDENT_REGEX # Represents a saved attribute involved in backward calculation. # Note that it can be a derived property of an input argument, e.g.: diff --git a/tools/codegen/api/cpp.py b/torchgen/api/cpp.py similarity index 98% rename from tools/codegen/api/cpp.py rename to torchgen/api/cpp.py index 37034c5e164f23..0fd7323e3b9b8f 100644 --- a/tools/codegen/api/cpp.py +++ b/torchgen/api/cpp.py @@ -1,4 +1,4 @@ -from tools.codegen.model import ( +from torchgen.model import ( Argument, Arguments, BaseTy, @@ -12,7 +12,7 @@ TensorOptionsArguments, Type, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( ArgName, BaseCType, Binding, @@ -40,8 +40,8 @@ tensorOptionsT, symIntArrayRefT, ) -from tools.codegen import local -from tools.codegen.utils import assert_never +from torchgen import local +from torchgen.utils import assert_never from typing import Optional, Sequence, Union, List, Set # This file describes the translation of JIT schema to the public C++ diff --git a/tools/codegen/api/dispatcher.py b/torchgen/api/dispatcher.py similarity index 93% rename from tools/codegen/api/dispatcher.py rename to torchgen/api/dispatcher.py index d6059862426988..ad1f17f7194037 100644 --- a/tools/codegen/api/dispatcher.py +++ b/torchgen/api/dispatcher.py @@ -1,4 +1,4 @@ -from tools.codegen.model import ( +from torchgen.model import ( Argument, FunctionSchema, Return, @@ -7,9 +7,9 @@ Type, ) -from tools.codegen.api.types import ArgName, Binding, NamedCType, CType -from tools.codegen.api import cpp -from tools.codegen.utils import concatMap, assert_never +from torchgen.api.types import ArgName, Binding, NamedCType, CType +from torchgen.api import cpp +from torchgen.utils import concatMap, assert_never import itertools from typing import Sequence, List, Union diff --git a/tools/codegen/api/functionalization.py b/torchgen/api/functionalization.py similarity index 98% rename from tools/codegen/api/functionalization.py rename to torchgen/api/functionalization.py index 535bcb301a768d..22ce2c3c4d00a0 100644 --- a/tools/codegen/api/functionalization.py +++ b/torchgen/api/functionalization.py @@ -1,11 +1,11 @@ -from tools.codegen.model import ( +from torchgen.model import ( FunctionSchema, BaseTy, BaseType, NativeFunctionsViewGroup, Argument, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( Binding, NamedCType, ConstRefCType, @@ -15,7 +15,7 @@ longT, boolT, ) -from tools.codegen.api import dispatcher +from torchgen.api import dispatcher from typing import List, Optional diff --git a/tools/codegen/api/lazy.py b/torchgen/api/lazy.py similarity index 99% rename from tools/codegen/api/lazy.py rename to torchgen/api/lazy.py index 1d98b36e646d63..0b4fcecc839da7 100644 --- a/tools/codegen/api/lazy.py +++ b/torchgen/api/lazy.py @@ -1,5 +1,5 @@ from typing import List, Union, Tuple, Optional -from tools.codegen.model import ( +from torchgen.model import ( Type, BaseTy, BaseType, @@ -11,7 +11,7 @@ TensorOptionsArguments, Argument, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( CType, BaseCppType, BaseCType, diff --git a/tools/codegen/api/meta.py b/torchgen/api/meta.py similarity index 89% rename from tools/codegen/api/meta.py rename to torchgen/api/meta.py index 9e8e4029428532..ad488d303d4632 100644 --- a/tools/codegen/api/meta.py +++ b/torchgen/api/meta.py @@ -1,4 +1,4 @@ -from tools.codegen.model import NativeFunctionsGroup +from torchgen.model import NativeFunctionsGroup # Follows dispatcher calling convention, but: # - Mutable arguments not allowed. Meta functions are always diff --git a/tools/codegen/api/native.py b/torchgen/api/native.py similarity index 96% rename from tools/codegen/api/native.py rename to torchgen/api/native.py index 991b1a4ce5c4bc..47610022e55a54 100644 --- a/tools/codegen/api/native.py +++ b/torchgen/api/native.py @@ -1,4 +1,4 @@ -from tools.codegen.model import ( +from torchgen.model import ( Argument, FunctionSchema, Return, @@ -7,7 +7,7 @@ Type, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( ArgName, BaseCType, Binding, @@ -24,9 +24,9 @@ boolT, scalarTypeT, ) -from tools.codegen.api import cpp -from tools.codegen import local -from tools.codegen.utils import assert_never +from torchgen.api import cpp +from torchgen import local +from torchgen.utils import assert_never from typing import Union, Sequence, List, Optional diff --git a/tools/codegen/api/python.py b/torchgen/api/python.py similarity index 99% rename from tools/codegen/api/python.py rename to torchgen/api/python.py index 4f8cadf1a47399..64ce1a9700f7de 100644 --- a/tools/codegen/api/python.py +++ b/torchgen/api/python.py @@ -1,10 +1,10 @@ from dataclasses import dataclass from typing import Optional, Union, Sequence, Set, List, Dict, Tuple -from tools.codegen.api.types import Binding, CppSignature, CppSignatureGroup -from tools.codegen.api import cpp -from tools.codegen.gen import pythonify_default -from tools.codegen.model import ( +from torchgen.api.types import Binding, CppSignature, CppSignatureGroup +from torchgen.api import cpp +from torchgen.gen import pythonify_default +from torchgen.model import ( Argument, BaseTy, BaseType, diff --git a/tools/codegen/api/structured.py b/torchgen/api/structured.py similarity index 95% rename from tools/codegen/api/structured.py rename to torchgen/api/structured.py index 2b6c7f2b50ccf0..4dff494a8a913b 100644 --- a/tools/codegen/api/structured.py +++ b/torchgen/api/structured.py @@ -1,4 +1,4 @@ -from tools.codegen.model import ( +from torchgen.model import ( Argument, BaseTy, BaseType, @@ -10,7 +10,7 @@ Type, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( ArgName, BaseCType, Binding, @@ -28,8 +28,8 @@ iTensorListRefT, ) -from tools.codegen.api import cpp -from tools.codegen.utils import assert_never +from torchgen.api import cpp +from torchgen.utils import assert_never from typing import Union, List @@ -65,7 +65,7 @@ def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType: elif isinstance(t, ListType): if t.elem == BaseType(BaseTy.Tensor): return NamedCType(binds, BaseCType(iTensorListRefT)) - # TODO: delete these special cases; see tools.codegen.api.cpp--these + # TODO: delete these special cases; see torchgen.api.cpp--these # must be changed in tandem, but there are problems; see # https://github.com/pytorch/pytorch/pull/51485 elif str(t.elem) == "int": diff --git a/tools/codegen/api/translate.py b/torchgen/api/translate.py similarity index 99% rename from tools/codegen/api/translate.py rename to torchgen/api/translate.py index 47aaeaed928708..12919ca4be221d 100644 --- a/tools/codegen/api/translate.py +++ b/torchgen/api/translate.py @@ -1,5 +1,5 @@ from typing import Dict, Sequence, List, NoReturn, Union -from tools.codegen.api.types import ( +from torchgen.api.types import ( tensorListT, BaseCType, Binding, @@ -204,7 +204,7 @@ def unsat(goal: NamedCType) -> NoReturn: {ctx_desc} -This probably means there is a missing rule in the rules of tools.codegen.api.translate. +This probably means there is a missing rule in the rules of torchgen.api.translate. Check this module for more information. """ ) diff --git a/tools/codegen/api/types.py b/torchgen/api/types.py similarity index 99% rename from tools/codegen/api/types.py rename to torchgen/api/types.py index 8487f6342372dd..35bb116c02d629 100644 --- a/tools/codegen/api/types.py +++ b/torchgen/api/types.py @@ -1,4 +1,4 @@ -from tools.codegen.model import ( +from torchgen.model import ( Argument, FunctionSchema, NativeFunction, @@ -327,7 +327,7 @@ def with_name(self, name: str) -> "NamedCType": # We don't distinguish between binding sites for different APIs; # instead, all of the important distinctions are encoded in CType, # which you can use to figure out if a given Binding is appropriate -# for use in another context. (See tools.codegen.api.translate) +# for use in another context. (See torchgen.api.translate) @dataclass(frozen=True) @@ -746,7 +746,7 @@ def kernel_signature( # Functions only, no types -from tools.codegen.api import ( +from torchgen.api import ( cpp, dispatcher, native, diff --git a/tools/codegen/api/ufunc.py b/torchgen/api/ufunc.py similarity index 97% rename from tools/codegen/api/ufunc.py rename to torchgen/api/ufunc.py index c8be0bc0d41680..5836e276240ee9 100644 --- a/tools/codegen/api/ufunc.py +++ b/torchgen/api/ufunc.py @@ -1,4 +1,4 @@ -from tools.codegen.model import ( +from torchgen.model import ( Argument, BaseTy, BaseType, @@ -8,8 +8,8 @@ DispatchKey, ) -import tools.codegen.api.types as api_types -from tools.codegen.api.types import ( +import torchgen.api.types as api_types +from torchgen.api.types import ( ArgName, BaseCType, Binding, @@ -20,7 +20,7 @@ BaseCppType, ) -from tools.codegen.api import cpp, structured +from torchgen.api import cpp, structured from dataclasses import dataclass from typing import List, Optional diff --git a/tools/codegen/api/unboxing.py b/torchgen/api/unboxing.py similarity index 98% rename from tools/codegen/api/unboxing.py rename to torchgen/api/unboxing.py index 29c2662d101279..06595353de291e 100644 --- a/tools/codegen/api/unboxing.py +++ b/torchgen/api/unboxing.py @@ -1,8 +1,8 @@ from typing import List, Tuple -from tools.codegen.api import cpp -from tools.codegen.api.types import Binding, CType, CppSignatureGroup -from tools.codegen.model import ( +from torchgen.api import cpp +from torchgen.api.types import Binding, CType, CppSignatureGroup +from torchgen.model import ( Argument, NativeFunction, Type, diff --git a/tools/codegen/build.bzl b/torchgen/build.bzl similarity index 86% rename from tools/codegen/build.bzl rename to torchgen/build.bzl index ed04e35a439133..d00078a3cfc1e1 100644 --- a/tools/codegen/build.bzl +++ b/torchgen/build.bzl @@ -1,6 +1,6 @@ def define_targets(rules): rules.py_library( - name = "codegen", + name = "torchgen", srcs = rules.glob(["**/*.py"]), deps = [ rules.requirement("PyYAML"), @@ -11,6 +11,6 @@ def define_targets(rules): rules.py_binary( name = "gen", - srcs = [":codegen"], + srcs = [":torchgen"], visibility = ["//visibility:public"], ) diff --git a/tools/codegen/code_template.py b/torchgen/code_template.py similarity index 100% rename from tools/codegen/code_template.py rename to torchgen/code_template.py diff --git a/tools/codegen/context.py b/torchgen/context.py similarity index 96% rename from tools/codegen/context.py rename to torchgen/context.py index af5f2aebae3343..ab0b90dcb73202 100644 --- a/tools/codegen/context.py +++ b/torchgen/context.py @@ -1,12 +1,12 @@ -from tools.codegen.utils import S, T, context -from tools.codegen.model import ( +from torchgen.utils import S, T, context +from torchgen.model import ( NativeFunction, NativeFunctionsGroup, NativeFunctionsViewGroup, BackendIndex, DispatchKey, ) -import tools.codegen.local as local +import torchgen.local as local import functools from typing import TypeVar, Union, Iterator, Callable, Dict, Optional diff --git a/tools/codegen/decompositions/gen_jit_decompositions.py b/torchgen/decompositions/gen_jit_decompositions.py similarity index 94% rename from tools/codegen/decompositions/gen_jit_decompositions.py rename to torchgen/decompositions/gen_jit_decompositions.py index e3ae8b116def44..7cfbb803f9b8bf 100644 --- a/tools/codegen/decompositions/gen_jit_decompositions.py +++ b/torchgen/decompositions/gen_jit_decompositions.py @@ -4,14 +4,14 @@ from torch.jit._decompositions import decomposition_table -# from tools.codegen.code_template import CodeTemplate +# from torchgen.code_template import CodeTemplate DECOMP_HEADER = r""" /** * @generated * This is an auto-generated file. Please do not modify it by hand. * To re-generate, please run: - * cd ~/pytorch && python tools/codegen/decompositions/gen_jit_decompositions.py + * cd ~/pytorch && python torchgen/decompositions/gen_jit_decompositions.py */ #include #include diff --git a/tools/codegen/dest/__init__.py b/torchgen/dest/__init__.py similarity index 100% rename from tools/codegen/dest/__init__.py rename to torchgen/dest/__init__.py diff --git a/tools/codegen/dest/lazy_ir.py b/torchgen/dest/lazy_ir.py similarity index 98% rename from tools/codegen/dest/lazy_ir.py rename to torchgen/dest/lazy_ir.py index f7611d0ec9b5ae..007c649df0a1f8 100644 --- a/tools/codegen/dest/lazy_ir.py +++ b/torchgen/dest/lazy_ir.py @@ -1,22 +1,22 @@ from abc import ABC from typing import List, Union from dataclasses import dataclass -from tools.codegen.context import method_with_native_function -from tools.codegen.model import BackendIndex, NativeFunction, NativeFunctionsGroup -from tools.codegen.api.types import ( +from torchgen.context import method_with_native_function +from torchgen.model import BackendIndex, NativeFunction, NativeFunctionsGroup +from torchgen.api.types import ( BaseCType, OptionalCType, VectorCType, kernel_signature, ) -import tools.codegen.api.dispatcher as dispatcher -from tools.codegen.api.lazy import ( +import torchgen.api.dispatcher as dispatcher +from torchgen.api.lazy import ( LazyIrSchema, LazyArgument, isValueType, tensorListValueT, ) -from tools.codegen.dest.lazy_ts_lowering import ts_lowering_body +from torchgen.dest.lazy_ts_lowering import ts_lowering_body def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str: diff --git a/tools/codegen/dest/lazy_ts_lowering.py b/torchgen/dest/lazy_ts_lowering.py similarity index 92% rename from tools/codegen/dest/lazy_ts_lowering.py rename to torchgen/dest/lazy_ts_lowering.py index 6c6e117ee2ca88..34470d776f66b9 100644 --- a/tools/codegen/dest/lazy_ts_lowering.py +++ b/torchgen/dest/lazy_ts_lowering.py @@ -1,7 +1,7 @@ from typing import Union -from tools.codegen.model import NativeFunction, NativeFunctionsGroup -from tools.codegen.api.lazy import LazyIrSchema -from tools.codegen.api.types import OptionalCType +from torchgen.model import NativeFunction, NativeFunctionsGroup +from torchgen.api.lazy import LazyIrSchema +from torchgen.api.types import OptionalCType def ts_lowering_body(f: Union[NativeFunctionsGroup, NativeFunction]) -> str: diff --git a/tools/codegen/dest/native_functions.py b/torchgen/dest/native_functions.py similarity index 85% rename from tools/codegen/dest/native_functions.py rename to torchgen/dest/native_functions.py index 3f7fc9093c53ad..67db9795f11ede 100644 --- a/tools/codegen/dest/native_functions.py +++ b/torchgen/dest/native_functions.py @@ -1,11 +1,11 @@ from typing import List, Union, Optional -from tools.codegen.context import with_native_function_and_index -from tools.codegen.utils import mapMaybe -from tools.codegen.model import NativeFunction, NativeFunctionsGroup, BackendIndex -from tools.codegen.api.types import kernel_signature -import tools.codegen.api.meta as meta -import tools.codegen.api.structured as structured +from torchgen.context import with_native_function_and_index +from torchgen.utils import mapMaybe +from torchgen.model import NativeFunction, NativeFunctionsGroup, BackendIndex +from torchgen.api.types import kernel_signature +import torchgen.api.meta as meta +import torchgen.api.structured as structured @with_native_function_and_index diff --git a/tools/codegen/dest/register_dispatch_key.py b/torchgen/dest/register_dispatch_key.py similarity index 98% rename from tools/codegen/dest/register_dispatch_key.py rename to torchgen/dest/register_dispatch_key.py index 1cef385f4bc72b..99626ebb664791 100644 --- a/tools/codegen/dest/register_dispatch_key.py +++ b/torchgen/dest/register_dispatch_key.py @@ -4,9 +4,9 @@ from dataclasses import dataclass import textwrap -from tools.codegen.context import method_with_native_function, native_function_manager -from tools.codegen.utils import Target, mapMaybe, assert_never -from tools.codegen.model import ( +from torchgen.context import method_with_native_function, native_function_manager +from torchgen.utils import Target, mapMaybe, assert_never +from torchgen.model import ( DispatchKey, NativeFunction, NativeFunctionsGroup, @@ -18,7 +18,7 @@ BackendIndex, gets_generated_out_inplace_wrapper, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( BaseCType, Binding, ConstRefCType, @@ -32,11 +32,11 @@ NamedCType, DispatcherSignature, ) -import tools.codegen.api.meta as meta -import tools.codegen.api.cpp as cpp -import tools.codegen.api.structured as structured -from tools.codegen.api.translate import translate -from tools.codegen.selective_build.selector import SelectiveBuilder +import torchgen.api.meta as meta +import torchgen.api.cpp as cpp +import torchgen.api.structured as structured +from torchgen.api.translate import translate +from torchgen.selective_build.selector import SelectiveBuilder def gen_registration_headers( diff --git a/tools/codegen/dest/ufunc.py b/torchgen/dest/ufunc.py similarity index 98% rename from tools/codegen/dest/ufunc.py rename to torchgen/dest/ufunc.py index fcfe45c02a94dd..4b81c4218f2ef5 100644 --- a/tools/codegen/dest/ufunc.py +++ b/torchgen/dest/ufunc.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from typing import Union, Optional, List, Tuple, Dict, Sequence -from tools.codegen.api.translate import translate -from tools.codegen.model import ( +from torchgen.api.translate import translate +from torchgen.model import ( NativeFunctionsGroup, ScalarType, UfuncKey, @@ -10,9 +10,9 @@ BaseTy, Argument, ) -import tools.codegen.api.ufunc as ufunc -from tools.codegen.api.ufunc import UfunctorBindings -from tools.codegen.api.types import ( +import torchgen.api.ufunc as ufunc +from torchgen.api.ufunc import UfunctorBindings +from torchgen.api.types import ( StructuredImplSignature, scalar_t, opmath_t, @@ -24,7 +24,7 @@ ScalarTypeToCppMapping, VectorizedCType, ) -from tools.codegen.context import with_native_function +from torchgen.context import with_native_function # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # diff --git a/tools/codegen/gen.py b/torchgen/gen.py similarity index 99% rename from tools/codegen/gen.py rename to torchgen/gen.py index 2de26b9a9ee5c0..4eb22bd91801eb 100644 --- a/tools/codegen/gen.py +++ b/torchgen/gen.py @@ -8,7 +8,7 @@ import json from dataclasses import dataclass -from tools.codegen.model import ( +from torchgen.model import ( STRUCTURED_DISPATCH_KEYS, Argument, DispatchKey, @@ -33,7 +33,7 @@ BaseOperatorName, Tag, ) -from tools.codegen.api.types import ( +from torchgen.api.types import ( Binding, CppSignatureGroup, DispatcherSignature, @@ -41,15 +41,15 @@ NativeSignature, SpecialArgName, ) -from tools.codegen.api import cpp -import tools.codegen.api.dispatcher as dispatcher -import tools.codegen.api.native as native -import tools.codegen.api.meta as meta -import tools.codegen.api.structured as structured -from tools.codegen.api.translate import translate -from tools.codegen.code_template import CodeTemplate -from tools.codegen.selective_build.selector import SelectiveBuilder -from tools.codegen.utils import ( +from torchgen.api import cpp +import torchgen.api.dispatcher as dispatcher +import torchgen.api.native as native +import torchgen.api.meta as meta +import torchgen.api.structured as structured +from torchgen.api.translate import translate +from torchgen.code_template import CodeTemplate +from torchgen.selective_build.selector import SelectiveBuilder +from torchgen.utils import ( Target, concatMap, context, @@ -60,14 +60,14 @@ assert_never, make_file_manager, ) -from tools.codegen.context import ( +from torchgen.context import ( method_with_native_function, native_function_manager, with_native_function_and_indices, with_native_function, ) -import tools.codegen.dest as dest -from tools.codegen.gen_functionalization_type import ( +import torchgen.dest as dest +from torchgen.gen_functionalization_type import ( gen_functionalization_definition, gen_functionalization_registration, gen_functionalization_view_inverse_declaration, @@ -2281,7 +2281,7 @@ def main() -> None: #include #include """ - from tools.codegen.model import dispatch_keys + from torchgen.model import dispatch_keys # Only a limited set of dispatch keys get CPUFunctions.h headers generated # for them; this is the set diff --git a/tools/codegen/gen_backend_stubs.py b/torchgen/gen_backend_stubs.py similarity index 96% rename from tools/codegen/gen_backend_stubs.py rename to torchgen/gen_backend_stubs.py index 46f1202df3e3b7..beee7a15e0db13 100644 --- a/tools/codegen/gen_backend_stubs.py +++ b/torchgen/gen_backend_stubs.py @@ -5,12 +5,12 @@ import re from collections import namedtuple, Counter, defaultdict from typing import List, Dict, Union, Sequence, Optional -from tools.codegen.gen import ( +from torchgen.gen import ( get_grouped_native_functions, parse_native_yaml, NamespaceHelper, ) -from tools.codegen.model import ( +from torchgen.model import ( BackendIndex, BackendMetadata, DispatchKey, @@ -18,13 +18,13 @@ NativeFunctionsGroup, OperatorName, ) -from tools.codegen.selective_build.selector import SelectiveBuilder -from tools.codegen.utils import Target, concatMap, context, YamlLoader, FileManager -from tools.codegen.context import native_function_manager -from tools.codegen.code_template import CodeTemplate -import tools.codegen.dest as dest -import tools.codegen.api.dispatcher as dispatcher -from tools.codegen.api.types import DispatcherSignature +from torchgen.selective_build.selector import SelectiveBuilder +from torchgen.utils import Target, concatMap, context, YamlLoader, FileManager +from torchgen.context import native_function_manager +from torchgen.code_template import CodeTemplate +import torchgen.dest as dest +import torchgen.api.dispatcher as dispatcher +from torchgen.api.types import DispatcherSignature # Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key. @@ -473,8 +473,8 @@ def run( source_yaml: str, output_dir: str, dry_run: bool, impl_path: Optional[str] = None ) -> None: - # Assumes that this file lives at PYTORCH_ROOT/tools/codegen/gen_backend_stubs.py - pytorch_root = pathlib.Path(__file__).parent.parent.parent.absolute() + # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py + pytorch_root = pathlib.Path(__file__).parent.parent.absolute() template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates") def make_file_manager(install_dir: str) -> FileManager: diff --git a/tools/codegen/gen_functionalization_type.py b/torchgen/gen_functionalization_type.py similarity index 98% rename from tools/codegen/gen_functionalization_type.py rename to torchgen/gen_functionalization_type.py index fa44447a82654b..9d806fdaeb4d6c 100644 --- a/tools/codegen/gen_functionalization_type.py +++ b/torchgen/gen_functionalization_type.py @@ -1,18 +1,18 @@ -from tools.codegen.api import cpp -from tools.codegen.api.types import ( +from torchgen.api import cpp +from torchgen.api.types import ( DispatcherSignature, Binding, FunctionalizationLambda, ViewInverseSignature, NativeSignature, ) -from tools.codegen.api.translate import translate -from tools.codegen.context import ( +from torchgen.api.translate import translate +from torchgen.context import ( with_native_function, with_native_function_and, native_function_manager, ) -from tools.codegen.model import ( +from torchgen.model import ( Argument, NativeFunction, SchemaKind, @@ -26,7 +26,7 @@ NativeFunctionsViewGroup, ListType, ) -from tools.codegen.selective_build.selector import SelectiveBuilder +from torchgen.selective_build.selector import SelectiveBuilder from typing import List, Optional, Union, Tuple diff --git a/tools/codegen/gen_lazy_tensor.py b/torchgen/gen_lazy_tensor.py similarity index 98% rename from tools/codegen/gen_lazy_tensor.py rename to torchgen/gen_lazy_tensor.py index 5547c1c31e03c5..6d267961939bda 100644 --- a/tools/codegen/gen_lazy_tensor.py +++ b/torchgen/gen_lazy_tensor.py @@ -16,21 +16,21 @@ Tuple, Type, ) -from tools.codegen.dest.lazy_ir import GenLazyIR, GenTSLazyIR -from tools.codegen.gen import ( +from torchgen.dest.lazy_ir import GenLazyIR, GenTSLazyIR +from torchgen.gen import ( get_grouped_native_functions, parse_native_yaml, NamespaceHelper, ) -from tools.codegen.model import ( +from torchgen.model import ( FunctionSchema, NativeFunction, NativeFunctionsGroup, OperatorName, ) -from tools.codegen.selective_build.selector import SelectiveBuilder -from tools.codegen.utils import concatMap, YamlLoader, FileManager -import tools.codegen.dest as dest +from torchgen.selective_build.selector import SelectiveBuilder +from torchgen.utils import concatMap, YamlLoader, FileManager +import torchgen.dest as dest from .gen_backend_stubs import ( parse_backend_yaml, error_on_missing_kernels, @@ -225,7 +225,7 @@ def main() -> None: ) options = parser.parse_args() - # Assumes that this file lives at PYTORCH_ROOT/tools/codegen/gen_backend_stubs.py + # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py torch_root = pathlib.Path(__file__).parent.parent.parent.absolute() aten_path = str(torch_root / "aten" / "src" / "ATen") lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator diff --git a/tools/codegen/local.py b/torchgen/local.py similarity index 100% rename from tools/codegen/local.py rename to torchgen/local.py diff --git a/tools/codegen/model.py b/torchgen/model.py similarity index 99% rename from tools/codegen/model.py rename to torchgen/model.py index 489842daa62956..7cc91cd543ce7e 100644 --- a/tools/codegen/model.py +++ b/torchgen/model.py @@ -1,6 +1,6 @@ import re -from tools.codegen.utils import assert_never +from torchgen.utils import assert_never from dataclasses import dataclass from typing import List, Dict, Optional, Iterator, Tuple, Set, Sequence, Callable, Union @@ -258,7 +258,7 @@ def parse_set(values: str) -> Set["ScalarType"]: # Represents the valid entries for ufunc_inner_loop in native_functions.yaml. -# NB: if you add a new UfuncKey, you will teach tools.codegen.dest.ufunc how +# NB: if you add a new UfuncKey, you will teach torchgen.dest.ufunc how # to process it. Most logic will ignore keys they don't understand, so your # new key will get silently ignored until you hook in logic to deal with it. class UfuncKey(Enum): @@ -518,7 +518,7 @@ def from_yaml( assert tag_str is None or isinstance(tag_str, str), f"not a str: {tag_str}" tag = Tag.parse(tag_str) if tag_str else None - from tools.codegen.api import cpp + from torchgen.api import cpp raw_dispatch = e.pop("dispatch", None) assert raw_dispatch is None or isinstance(raw_dispatch, dict), e @@ -2132,4 +2132,4 @@ def to_list(self) -> List[str]: return replace_list -import tools.codegen.api.ufunc as ufunc +import torchgen.api.ufunc as ufunc diff --git a/tools/codegen/operator_versions/__init__.py b/torchgen/operator_versions/__init__.py similarity index 100% rename from tools/codegen/operator_versions/__init__.py rename to torchgen/operator_versions/__init__.py diff --git a/tools/codegen/operator_versions/gen_mobile_upgraders.py b/torchgen/operator_versions/gen_mobile_upgraders.py similarity index 98% rename from tools/codegen/operator_versions/gen_mobile_upgraders.py rename to torchgen/operator_versions/gen_mobile_upgraders.py index 38b3eb421922c2..54c5b3a5628afa 100644 --- a/tools/codegen/operator_versions/gen_mobile_upgraders.py +++ b/torchgen/operator_versions/gen_mobile_upgraders.py @@ -5,9 +5,9 @@ from typing import Any, Dict, List import torch -from tools.codegen.code_template import CodeTemplate +from torchgen.code_template import CodeTemplate from torch.jit.generate_bytecode import generate_upgraders_bytecode -from tools.codegen.operator_versions.gen_mobile_upgraders_constant import ( +from torchgen.operator_versions.gen_mobile_upgraders_constant import ( MOBILE_UPGRADERS_HEADER_DESCRIPTION, ) diff --git a/tools/codegen/operator_versions/gen_mobile_upgraders_constant.py b/torchgen/operator_versions/gen_mobile_upgraders_constant.py similarity index 66% rename from tools/codegen/operator_versions/gen_mobile_upgraders_constant.py rename to torchgen/operator_versions/gen_mobile_upgraders_constant.py index f83e5d1f4c943b..04b5ad887e5415 100644 --- a/tools/codegen/operator_versions/gen_mobile_upgraders_constant.py +++ b/torchgen/operator_versions/gen_mobile_upgraders_constant.py @@ -2,6 +2,6 @@ * @generated * This is an auto-generated file. Please do not modify it by hand. * To re-generate, please run: - * cd ~/pytorch && python tools/codegen/operator_versions/gen_mobile_upgraders.py + * cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py */ """ diff --git a/tools/codegen/selective_build/__init__.py b/torchgen/selective_build/__init__.py similarity index 100% rename from tools/codegen/selective_build/__init__.py rename to torchgen/selective_build/__init__.py diff --git a/tools/codegen/selective_build/operator.py b/torchgen/selective_build/operator.py similarity index 100% rename from tools/codegen/selective_build/operator.py rename to torchgen/selective_build/operator.py diff --git a/tools/codegen/selective_build/selector.py b/torchgen/selective_build/selector.py similarity index 99% rename from tools/codegen/selective_build/selector.py rename to torchgen/selective_build/selector.py index ffe557a17aef16..e65ecf5eaf452b 100644 --- a/tools/codegen/selective_build/selector.py +++ b/torchgen/selective_build/selector.py @@ -3,8 +3,8 @@ from dataclasses import dataclass -from tools.codegen.model import NativeFunction -from tools.codegen.selective_build.operator import ( +from torchgen.model import NativeFunction +from torchgen.selective_build.operator import ( SelectiveBuildOperator, merge_debug_info, merge_operator_dicts, diff --git a/tools/codegen/shape_functions/gen_jit_shape_functions.py b/torchgen/shape_functions/gen_jit_shape_functions.py similarity index 98% rename from tools/codegen/shape_functions/gen_jit_shape_functions.py rename to torchgen/shape_functions/gen_jit_shape_functions.py index b8be57848b3749..cd4384360045e1 100644 --- a/tools/codegen/shape_functions/gen_jit_shape_functions.py +++ b/torchgen/shape_functions/gen_jit_shape_functions.py @@ -10,7 +10,7 @@ * This is an auto-generated file. Please do not modify it by hand. * To re-generate, please run: * cd ~/pytorch && python - * tools/codegen/shape_functions/gen_jit_shape_functions.py + * torchgen/shape_functions/gen_jit_shape_functions.py */ #include #include diff --git a/tools/codegen/static_runtime/__init__.py b/torchgen/static_runtime/__init__.py similarity index 100% rename from tools/codegen/static_runtime/__init__.py rename to torchgen/static_runtime/__init__.py diff --git a/tools/codegen/static_runtime/config.py b/torchgen/static_runtime/config.py similarity index 99% rename from tools/codegen/static_runtime/config.py rename to torchgen/static_runtime/config.py index cf78733d6a6d12..ed7d2a6cf7a123 100644 --- a/tools/codegen/static_runtime/config.py +++ b/torchgen/static_runtime/config.py @@ -1,4 +1,4 @@ -from tools.codegen.model import NativeFunctionsGroup +from torchgen.model import NativeFunctionsGroup from typing import Dict diff --git a/tools/codegen/static_runtime/gen_static_runtime_ops.py b/torchgen/static_runtime/gen_static_runtime_ops.py similarity index 96% rename from tools/codegen/static_runtime/gen_static_runtime_ops.py rename to torchgen/static_runtime/gen_static_runtime_ops.py index 8812bc7ba4ae0e..279612e4392dcc 100644 --- a/tools/codegen/static_runtime/gen_static_runtime_ops.py +++ b/torchgen/static_runtime/gen_static_runtime_ops.py @@ -1,7 +1,7 @@ -from tools.codegen import gen -from tools.codegen.context import native_function_manager -from tools.codegen.model import NativeFunctionsGroup -from tools.codegen.static_runtime import gen_structured +from torchgen import gen +from torchgen.context import native_function_manager +from torchgen.model import NativeFunctionsGroup +from torchgen.static_runtime import gen_structured import argparse import itertools diff --git a/tools/codegen/static_runtime/gen_structured.py b/torchgen/static_runtime/gen_structured.py similarity index 98% rename from tools/codegen/static_runtime/gen_structured.py rename to torchgen/static_runtime/gen_structured.py index ffd13e0b5ef60c..3bde94d5749d2f 100644 --- a/tools/codegen/static_runtime/gen_structured.py +++ b/torchgen/static_runtime/gen_structured.py @@ -1,6 +1,6 @@ -import tools.codegen.api.cpp as cpp -from tools.codegen.context import native_function_manager -from tools.codegen.model import ( +import torchgen.api.cpp as cpp +from torchgen.context import native_function_manager +from torchgen.model import ( Argument, BaseTy, FunctionSchema, @@ -11,7 +11,7 @@ TensorOptionsArguments, Type, ) -from tools.codegen.static_runtime import config +from torchgen.static_runtime import config import math from typing import List, Optional, Sequence, Tuple, Union diff --git a/tools/codegen/utils.py b/torchgen/utils.py similarity index 98% rename from tools/codegen/utils.py rename to torchgen/utils.py index aa53977d697e8c..4819f4950ae0f6 100644 --- a/tools/codegen/utils.py +++ b/torchgen/utils.py @@ -22,7 +22,7 @@ ) from enum import Enum -from tools.codegen.code_template import CodeTemplate +from torchgen.code_template import CodeTemplate # Safely load fast C Yaml loader/dumper if they are available try: @@ -185,7 +185,7 @@ def write_with_template( if isinstance(env, dict): # TODO: Update the comment reference to the correct location if "generated_comment" not in env: - comment = "@" + "generated by tools/codegen/gen.py" + comment = "@" + "generated by torchgen/gen.py" comment += " from {}".format(os.path.basename(template_fn)) env["generated_comment"] = comment template = _read_template(os.path.join(self.template_dir, template_fn))