forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
generate_code.py
236 lines (207 loc) · 8.07 KB
/
generate_code.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
import argparse
import os
import pathlib
import sys
from typing import Any, cast, Optional
import yaml
try:
# use faster C loader if available
from yaml import CSafeLoader as YamlLoader
except ImportError:
from yaml import SafeLoader as YamlLoader # type: ignore[assignment, misc]
NATIVE_FUNCTIONS_PATH = "aten/src/ATen/native/native_functions.yaml"
TAGS_PATH = "aten/src/ATen/native/tags.yaml"
def generate_code(
gen_dir: pathlib.Path,
native_functions_path: Optional[str] = None,
tags_path: Optional[str] = None,
install_dir: Optional[str] = None,
subset: Optional[str] = None,
disable_autograd: bool = False,
force_schema_registration: bool = False,
operator_selector: Any = None,
) -> None:
from torchgen.selective_build.selector import SelectiveBuilder
from tools.autograd.gen_annotated_fn_args import gen_annotated
from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
# Build ATen based Variable classes
if install_dir is None:
install_dir = os.fspath(gen_dir / "torch/csrc")
python_install_dir = os.fspath(gen_dir / "torch/testing/_internal/generated")
else:
python_install_dir = install_dir
autograd_gen_dir = os.path.join(install_dir, "autograd", "generated")
for d in (autograd_gen_dir, python_install_dir):
os.makedirs(d, exist_ok=True)
autograd_dir = os.fspath(pathlib.Path(__file__).parent.parent / "autograd")
if subset == "pybindings" or not subset:
gen_autograd_python(
native_functions_path or NATIVE_FUNCTIONS_PATH,
tags_path or TAGS_PATH,
autograd_gen_dir,
autograd_dir,
)
if operator_selector is None:
operator_selector = SelectiveBuilder.get_nop_selector()
if subset == "libtorch" or not subset:
gen_autograd(
native_functions_path or NATIVE_FUNCTIONS_PATH,
tags_path or TAGS_PATH,
autograd_gen_dir,
autograd_dir,
disable_autograd=disable_autograd,
operator_selector=operator_selector,
)
if subset == "python" or not subset:
gen_annotated(
native_functions_path or NATIVE_FUNCTIONS_PATH,
tags_path or TAGS_PATH,
python_install_dir,
autograd_dir,
)
def get_selector_from_legacy_operator_selection_list(
selected_op_list_path: str,
) -> Any:
with open(selected_op_list_path) as f:
# strip out the overload part
# It's only for legacy config - do NOT copy this code!
selected_op_list = {
opname.split(".", 1)[0] for opname in yaml.load(f, Loader=YamlLoader)
}
# Internal build doesn't use this flag any more. Only used by OSS
# build now. Every operator should be considered a root operator
# (hence generating unboxing code for it, which is consistent with
# the current behavior), and also be considered as used for
# training, since OSS doesn't support training on mobile for now.
#
is_root_operator = True
is_used_for_training = True
from torchgen.selective_build.selector import SelectiveBuilder
selector = SelectiveBuilder.from_legacy_op_registration_allow_list(
selected_op_list,
is_root_operator,
is_used_for_training,
)
return selector
def get_selector(
selected_op_list_path: Optional[str],
operators_yaml_path: Optional[str],
) -> Any:
# cwrap depends on pyyaml, so we can't import it earlier
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
from torchgen.selective_build.selector import SelectiveBuilder
assert not (
selected_op_list_path is not None and operators_yaml_path is not None
), (
"Expected at most one of selected_op_list_path and "
+ "operators_yaml_path to be set."
)
if selected_op_list_path is None and operators_yaml_path is None:
return SelectiveBuilder.get_nop_selector()
elif selected_op_list_path is not None:
return get_selector_from_legacy_operator_selection_list(selected_op_list_path)
else:
return SelectiveBuilder.from_yaml_path(cast(str, operators_yaml_path))
def main() -> None:
parser = argparse.ArgumentParser(description="Autogenerate code")
parser.add_argument("--native-functions-path")
parser.add_argument("--tags-path")
parser.add_argument(
"--gen-dir",
type=pathlib.Path,
default=pathlib.Path("."),
help="Root directory where to install files. Defaults to the current working directory.",
)
parser.add_argument(
"--install-dir",
"--install_dir",
help=(
"Deprecated. Use --gen-dir instead. The semantics are different, do not change "
"blindly."
),
)
parser.add_argument(
"--subset",
help='Subset of source files to generate. Can be "libtorch" or "pybindings". Generates both when omitted.',
)
parser.add_argument(
"--disable-autograd",
default=False,
action="store_true",
help="It can skip generating autograd related code when the flag is set",
)
parser.add_argument(
"--selected-op-list-path",
help="Path to the YAML file that contains the list of operators to include for custom build.",
)
parser.add_argument(
"--operators-yaml-path",
"--operators_yaml_path",
help="Path to the model YAML file that contains the list of operators to include for custom build.",
)
parser.add_argument(
"--force-schema-registration",
"--force_schema_registration",
action="store_true",
help="force it to generate schema-only registrations for ops that are not"
"listed on --selected-op-list",
)
parser.add_argument(
"--gen-lazy-ts-backend",
"--gen_lazy_ts_backend",
action="store_true",
help="Enable generation of the torch::lazy TorchScript backend",
)
parser.add_argument(
"--per-operator-headers",
"--per_operator_headers",
action="store_true",
help="Build lazy tensor ts backend with per-operator ATen headers, must match how ATen was built",
)
options = parser.parse_args()
generate_code(
options.gen_dir,
options.native_functions_path,
options.tags_path,
options.install_dir,
options.subset,
options.disable_autograd,
options.force_schema_registration,
# options.selected_op_list
operator_selector=get_selector(
options.selected_op_list_path, options.operators_yaml_path
),
)
if options.gen_lazy_ts_backend:
aten_path = os.path.dirname(os.path.dirname(options.native_functions_path))
ts_backend_yaml = os.path.join(aten_path, "native/ts_native_functions.yaml")
ts_native_functions = "torch/csrc/lazy/ts_backend/ts_native_functions.cpp"
ts_node_base = "torch/csrc/lazy/ts_backend/ts_node.h"
install_dir = options.install_dir or os.fspath(options.gen_dir / "torch/csrc")
lazy_install_dir = os.path.join(install_dir, "lazy/generated")
os.makedirs(lazy_install_dir, exist_ok=True)
assert os.path.isfile(
ts_backend_yaml
), f"Unable to access ts_backend_yaml: {ts_backend_yaml}"
assert os.path.isfile(
ts_native_functions
), f"Unable to access {ts_native_functions}"
from torchgen.dest.lazy_ir import GenTSLazyIR
from torchgen.gen_lazy_tensor import run_gen_lazy_tensor
run_gen_lazy_tensor(
aten_path=aten_path,
source_yaml=ts_backend_yaml,
backend_name="TorchScript",
output_dir=lazy_install_dir,
dry_run=False,
impl_path=ts_native_functions,
node_base="TsNode",
node_base_hdr=ts_node_base,
build_in_tree=True,
lazy_ir_generator=GenTSLazyIR,
per_operator_headers=options.per_operator_headers,
gen_forced_fallback_code=True,
)
if __name__ == "__main__":
main()