1load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "get_default_executorch_platforms", "is_xplat", "runtime", "struct_to_json") 2load("@fbsource//xplat/executorch/build:selects.bzl", "selects") 3load("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl", "portable_header_list", "portable_source_list") 4 5# Headers that declare the function signatures of the C++ functions that 6# map to entries in functions.yaml and custom_ops.yaml. 7OPERATOR_HEADERS = [ 8 # buildifier: keep sorted 9 "Functions.h", 10 "NativeFunctions.h", 11] 12 13STATIC_DISPATCH_BACKENDS = [ 14 "CPU", 15] 16 17# In ATen enabled mode, Functions.h will call into ATen/CPUFunctions.h and 18# other ATen generated code. Hence we don't need to include the code generated 19# by executorch codegen. 20GENERATED_SOURCES = [ 21 # buildifier: keep sorted 22 "RegisterCodegenUnboxedKernelsEverything.cpp", 23] 24 25MANUAL_REGISTRATION_SOURCES = [ 26 # buildifier: keep sorted 27 "RegisterKernelsEverything.cpp", 28] 29 30MANUAL_REGISTRATION_HEADERS = [ 31 "RegisterKernels.h", 32] 33 34# Fake kernels only return `out` or any other tensor from arguments 35CUSTOM_OPS_DUMMY_KERNEL_SOURCES = ["Register{}Stub.cpp".format(backend) for backend in STATIC_DISPATCH_BACKENDS] 36 37CUSTOM_OPS_GENERATED_SOURCES = ["Register{}CustomOps.cpp".format(backend) for backend in STATIC_DISPATCH_BACKENDS] 38 39CUSTOM_OPS_NATIVE_FUNCTION_HEADER = ["CustomOpsNativeFunctions.h"] 40 41CUSTOM_OPS_SCHEMA_REGISTRATION_SOURCES = [ 42 "RegisterSchema.cpp", 43] 44 45# Hide the dependency to caffe2 internally. 46def et_operator_library( 47 name, 48 ops = [], 49 ops_dict = {}, 50 model = None, 51 include_all_operators = False, 52 ops_schema_yaml_target = None, 53 server_generated_yaml_target = None, 54 **kwargs): 55 # do a dummy copy if server_generated_yaml_target is set 56 if server_generated_yaml_target: 57 if include_all_operators or ops_schema_yaml_target or model or ops or ops_dict: 58 fail("Since server_generated_yaml_target is set, ops, ops_dict, include_all_operators and ops_schema_yaml_target shouldn't be set.") 59 genrule_cmd = [ 60 "cp", 61 "$(location {})".format(server_generated_yaml_target), 62 "$OUT", 63 ] 64 else: 65 genrule_cmd = [ 66 "$(exe //executorch/codegen/tools:gen_oplist)", 67 "--output_path=${OUT}", 68 ] 69 if ops_schema_yaml_target: 70 genrule_cmd.append( 71 "--ops_schema_yaml_path=$(location {})".format(ops_schema_yaml_target), 72 ) 73 if ops: 74 genrule_cmd.append( 75 "--root_ops=" + ",".join(ops), 76 ) 77 if ops_dict: 78 ops_dict_json = struct_to_json(ops_dict) 79 genrule_cmd.append( 80 "--ops_dict='{}'".format(ops_dict_json), 81 ) 82 if model: 83 genrule_cmd.append( 84 "--model_file_path=$(location {})".format(model), 85 ) 86 if include_all_operators: 87 genrule_cmd.append( 88 "--include_all_operators", 89 ) 90 91 # TODO(larryliu0820): Remove usages of this flag. 92 if "define_static_targets" in kwargs: 93 kwargs.pop("define_static_targets") 94 runtime.genrule( 95 name = name, 96 macros_only = False, 97 cmd = " ".join(genrule_cmd), 98 out = "selected_operators.yaml", 99 labels = ["et_operator_library"], 100 **kwargs 101 ) 102 103def _get_headers(genrule_name, prefix = "", custom_op = None, manual_registration = False): 104 headers = OPERATOR_HEADERS + (CUSTOM_OPS_NATIVE_FUNCTION_HEADER if custom_op else []) 105 return { 106 prefix + f: ":{}[{}]".format(genrule_name, f) 107 for f in (MANUAL_REGISTRATION_HEADERS if manual_registration else []) 108 }, { 109 prefix + f: ":{}[{}]".format(genrule_name, f) 110 for f in headers 111 } 112 113def _prepare_genrule_and_lib( 114 name, 115 functions_yaml_path = None, 116 custom_ops_yaml_path = None, 117 custom_ops_requires_runtime_registration = True, 118 manual_registration = False, 119 aten_mode = False): 120 """ 121 This function returns two dicts `genrules` and `libs`, derived from the arguments being passed 122 to `executorch_generated_lib`. `genrules` contains all information related to what genrules to 123 run. The structure of it looks like this: 124 { 125 <genrule_name_1>: { 126 "cmd": <genrule_cmd_1>, 127 "outs": <outs_list_1>, 128 }, 129 <genrule_name_2>: { 130 "cmd": <genrule_cmd_2>, 131 "outs": <outs_list_2>, 132 }, 133 } 134 For `libs`, similarly it contains information related to what cxx_library we will generate. 135 The structure looks like: 136 { 137 <lib_name_1>: { 138 "genrule": <genrule_1>, # where to find the source files 139 "srcs": <srcs_1>, # the source file names 140 }, 141 <lib_name_2>: { 142 "genrule": <genrule_2>, # where to find the source files 143 "srcs": <srcs_2>, # the source file names 144 }, 145 } 146 """ 147 target = runtime.external_dep_location("gen-executorch") 148 aten_src_path = runtime.external_dep_location("aten-src-path") 149 genrule_cmd = [ 150 "$(exe {})".format(target), 151 "--source-path=$(location //executorch/codegen:templates)", 152 "--tags-path $(location {})/aten/src/ATen/native/tags.yaml".format(aten_src_path), 153 "--aten_yaml_path $(location {})/aten/src/ATen/native/native_functions.yaml".format(aten_src_path), 154 "--install_dir=${OUT}", 155 # TODO(dbort): Add a second step that verifies that the set of 156 # actually-generated files matches GENERATED_FILES. 157 ] 158 159 # Sources for generated kernel registration lib 160 sources = MANUAL_REGISTRATION_SOURCES if manual_registration else GENERATED_SOURCES 161 162 # The command will always generate these files. 163 genrule_outs = sources + OPERATOR_HEADERS + (CUSTOM_OPS_NATIVE_FUNCTION_HEADER if custom_ops_yaml_path else []) + MANUAL_REGISTRATION_HEADERS 164 165 genrules = {} 166 libs = {} 167 168 # if aten_mode is true, we don't need functions_yaml_path 169 genrule_name = name + "_combined" 170 exported_headers, headers = _get_headers(genrule_name = genrule_name, custom_op = custom_ops_yaml_path, manual_registration = manual_registration) 171 172 # need to register ATen ops into Executorch runtime: 173 need_reg_aten_ops = aten_mode or functions_yaml_path 174 175 # need to register custom ops into Executorch runtime: 176 need_reg_custom_ops = custom_ops_yaml_path and custom_ops_requires_runtime_registration 177 178 need_reg_ops = need_reg_aten_ops or need_reg_custom_ops 179 180 if need_reg_aten_ops: 181 path = ( 182 "$(location {})/aten/src/ATen/native/native_functions.yaml".format(aten_src_path) 183 ) if not functions_yaml_path else functions_yaml_path 184 genrule_cmd = genrule_cmd + [ 185 "--functions_yaml_path={}".format(path), 186 ] 187 if aten_mode: 188 genrule_cmd = genrule_cmd + ["--use_aten_lib"] 189 if manual_registration: 190 genrule_cmd = genrule_cmd + [ 191 "--manual_registration", 192 ] 193 if custom_ops_yaml_path: 194 genrule_cmd = genrule_cmd + [ 195 "--custom_ops_yaml_path=" + custom_ops_yaml_path, 196 ] 197 genrules[genrule_name] = { 198 "cmd": genrule_cmd, 199 "outs": genrule_outs, 200 } 201 202 if need_reg_ops: 203 libs[name] = { 204 "exported_headers": exported_headers, 205 "genrule": genrule_name, 206 "headers": headers, 207 "srcs": sources, 208 } 209 210 header_lib = name + "_headers" 211 libs[header_lib] = { 212 "exported_headers": exported_headers, 213 "headers": headers, 214 } 215 return genrules, libs 216 217def _prepare_custom_ops_genrule_and_lib( 218 name, 219 custom_ops_yaml_path = None, 220 deps = [], 221 kernels = []): 222 """Similar to _prepare_genrule_and_lib but for custom ops.""" 223 genrules = {} 224 libs = {} 225 aten_src_path = runtime.external_dep_location("aten-src-path") 226 target = runtime.external_dep_location("gen-executorch") 227 genrule_name = name + "_gen" 228 229 if custom_ops_yaml_path: 230 # genrule for selective build from static operator list 231 oplist_dir_name = name + "_oplist" 232 runtime.genrule( 233 name = oplist_dir_name, 234 macros_only = False, 235 cmd = ("$(exe fbsource//xplat/executorch/codegen/tools:gen_all_oplist) " + 236 "--model_file_list_path $(@query_outputs 'attrfilter(labels, et_operator_library, deps(set({deps})))') " + 237 "--allow_include_all_overloads " + 238 "--output_dir $OUT ").format(deps = " ".join(["\"{}\"".format(d) for d in deps])), 239 outs = {"selected_operators.yaml": ["selected_operators.yaml"]}, 240 default_outs = ["."], 241 ) 242 243 # genrule for generating operator kernel bindings 244 genrule_cmd = [ 245 "$(exe {})".format(target), 246 "--source-path=$(location //executorch/codegen:templates)", 247 "--tags-path $(location {})/aten/src/ATen/native/tags.yaml".format(aten_src_path), 248 "--aten_yaml_path $(location {})/aten/src/ATen/native/native_functions.yaml".format(aten_src_path), 249 "--custom_ops_yaml_path=" + custom_ops_yaml_path, 250 "--install_dir=${OUT}", 251 "--op_selection_yaml_path=$(location :{}[selected_operators.yaml])".format(oplist_dir_name), 252 ] 253 254 # Determine what sources custom_ops_<name> target should include 255 custom_ops_sources = CUSTOM_OPS_SCHEMA_REGISTRATION_SOURCES + ( 256 CUSTOM_OPS_GENERATED_SOURCES if kernels else CUSTOM_OPS_DUMMY_KERNEL_SOURCES 257 ) 258 259 # lib for registering custom ops to pytorch 260 libs[name] = { 261 "genrule": genrule_name, 262 "headers": [], 263 "srcs": custom_ops_sources, 264 } 265 my_cmd = "" 266 for rule_substr in genrule_cmd: 267 if my_cmd != "": 268 my_cmd += " " 269 my_cmd += rule_substr 270 genrules[genrule_name] = { 271 "cmd": my_cmd, 272 "outs": {out: [out] for out in CUSTOM_OPS_NATIVE_FUNCTION_HEADER + custom_ops_sources}, 273 } 274 return genrules, libs 275 276def exir_custom_ops_aot_lib( 277 name, 278 yaml_target = None, 279 visibility = [], 280 kernels = [], 281 deps = [], 282 compiler_flags = [], 283 define_static_target = False, 284 platforms = get_default_executorch_platforms()): 285 """Generates a C++ library that helps to register the custom ops into PyTorch, 286 so they are visible to EXIR. To use this, we need to load the generated so file: 287 ```python 288 torch.ops.load_library(...) 289 ``` 290 291 Args: 292 name: recommending a name that is obvious for user to tell this should only 293 be used by EXIR (AOT) but not executorch runtime. 294 yaml_target: buck target for the yaml file with proper schema and kernel entry. 295 See https://github.com/pytorch/executorch/blob/main/kernels/portable/README.md#yaml-schema 296 for the schema syntax. 297 visibility: visibility of the generated library. 298 kernels: C++ kernels for these custom ops. They need to be implemented using ATen/c10 basics. 299 deps: dependencies of the generated library. 300 """ 301 genrules, libs = _prepare_custom_ops_genrule_and_lib( 302 name = name, 303 custom_ops_yaml_path = selects.apply(yaml_target, lambda y: "$(location {})".format(y)), 304 kernels = kernels, 305 deps = deps, 306 ) 307 for genrule in genrules: 308 runtime.genrule( 309 name = genrule, 310 macros_only = False, 311 cmd = genrules[genrule]["cmd"], 312 outs = genrules[genrule]["outs"], 313 default_outs = ["."], 314 ) 315 for compiler_lib in libs: 316 runtime.cxx_library( 317 name = compiler_lib, 318 srcs = [ 319 ":{}[{}]".format(libs[compiler_lib]["genrule"], f) 320 for f in libs[compiler_lib]["srcs"] 321 ], 322 headers = { 323 "CustomOpsNativeFunctions.h": ":{}[CustomOpsNativeFunctions.h]".format(libs[compiler_lib]["genrule"]), 324 }, 325 # link_whole is necessary because the operators register themselves 326 # via static initializers that run at program startup. 327 # @lint-ignore BUCKLINT link_whole 328 link_whole = True, 329 visibility = visibility, 330 deps = kernels + deps, 331 external_deps = ["libtorch"], 332 define_static_target = define_static_target, 333 # Relax visibility restrictions since deps may include targets 334 # outside of //executorch. 335 _is_external_target = True, 336 # Explicitly indicate that this C++ library will be loaded by Python 337 # and consequently need to be exposed as shared libraries. It's not 338 # required, but when set it'll make builds faster. 339 supports_python_dlopen = True, 340 platforms = platforms, 341 compiler_flags = compiler_flags, 342 force_static = False, 343 ) 344 345# Used for dtype selective build. Genrules to copy source and header files. 346def portable_outs(target_name, file_list): 347 outs = {} 348 for file in file_list: 349 outs[file] = ["{}/{}".format(target_name, file)] 350 return outs 351 352def copy_portable_source_files(name): 353 target_name = "portable_source_files" 354 runtime.genrule( 355 name = name, 356 cmd = "cp -f -r $(location //executorch/kernels/portable/cpu:{}) $OUT/".format(target_name), 357 outs = portable_outs(target_name, portable_source_list()), 358 default_outs = ["."], 359 ) 360 361def copy_portable_header_files(name): 362 target_name = "portable_header_files" 363 runtime.genrule( 364 name = name, 365 cmd = "cp -f -r $(location //executorch/kernels/portable/cpu:{}) $OUT/".format(target_name), 366 outs = portable_outs(target_name, portable_header_list()), 367 default_outs = ["."], 368 ) 369 370def build_portable_lib(name, oplist_header_name, feature = None): 371 """Build portable lib from source. We build from source so that the generated header file, 372 selected_op_variants.h, can be used to selectively build the lib for different dtypes. 373 """ 374 375 # Copy portable cpp files. 376 portable_source_files = [] 377 copy_portable_source_files_genrule = name + "_copy_portable_source" 378 copy_portable_source_files(copy_portable_source_files_genrule) 379 for op in portable_source_list(): 380 portable_source_files.append(":{}[{}]".format(copy_portable_source_files_genrule, op)) 381 382 # Copy portable header files. 383 portable_header_files = {} 384 copy_portable_header_files_genrule = name + "_copy_portable_header" 385 copy_portable_header_files(copy_portable_header_files_genrule) 386 for header in portable_header_list(): 387 portable_header_files[header] = ":{}[{}]".format(copy_portable_header_files_genrule, header) 388 389 # Include dtype header. 390 portable_header_files["selected_op_variants.h"] = ":{}[selected_op_variants]".format(oplist_header_name) 391 392 # Build portable lib. 393 runtime.cxx_library( 394 name = name, 395 srcs = portable_source_files, 396 exported_headers = portable_header_files, 397 exported_preprocessor_flags = ["-DEXECUTORCH_SELECTIVE_BUILD_DTYPE"], 398 deps = ["//executorch/kernels/portable/cpu/pattern:all_deps", "//executorch/kernels/portable/cpu/util:all_deps"], 399 # header_namespace is only available in xplat. See https://fburl.com/code/we2gvopk 400 header_namespace = "executorch/kernels/portable/cpu", 401 compiler_flags = ["-Wno-missing-prototypes"] + 402 # For shared library build, we don't want to expose symbols of 403 # kernel implementation (ex torch::executor::native::tanh_out) 404 # to library users. They should use kernels through registry only. 405 # With visibility=hidden, linker won't expose kernel impl symbols 406 # so it can prune unregistered kernels. 407 # Currently fbcode links all dependent libraries through shared 408 # library, and it blocks users like unit tests to use kernel 409 # implementation directly. So we enable this for xplat only. 410 ["-fvisibility=hidden"], 411 # WARNING: using a deprecated API to avoid being built into a shared 412 # library. In the case of dynamically loading so library we don't want 413 # it to depend on other so libraries because that way we have to 414 # specify library directory path. 415 force_static = True, 416 # link_whole is necessary because the operators register themselves 417 # via static initializers that run at program startup. 418 # @lint-ignore BUCKLINT link_whole 419 link_whole = True, 420 feature = feature, 421 ) 422 423def executorch_generated_lib( 424 name, 425 functions_yaml_target = None, 426 custom_ops_yaml_target = None, 427 fallback_yaml_target = None, 428 define_static_targets = False, 429 custom_ops_aten_kernel_deps = [], 430 custom_ops_requires_runtime_registration = True, 431 custom_ops_requires_aot_registration = True, 432 visibility = [], 433 aten_mode = False, 434 manual_registration = False, 435 use_default_aten_ops_lib = True, 436 deps = [], 437 xplat_deps = [], 438 fbcode_deps = [], 439 platforms = get_default_executorch_platforms(), 440 compiler_flags = [], 441 kernel_deps = [], 442 dtype_selective_build = False, 443 feature = None): 444 """Emits 0-3 C++ library targets (in fbcode or xplat) containing code to 445 dispatch the operators specified in the provided yaml files. 446 447 Generates 448 * `<name>` C++ library responsible to register both ATen operators and custom ops 449 into Executorch runtime. 450 * `custom_ops_<name>` C++ library responsible to register custom ops into PyTorch 451 runtime. 452 Args: 453 name: The name of the C++ library target to emit. Also emits a 454 header-only C++ library target named `<name>_headers` that declares 455 the signatures for the C++ functions that map to the entries in 456 `functions.yaml` and `custom_ops.yaml`. 457 If `custom_ops_yaml_target` is specified, also emits: 458 - `custom_ops_<name>`: A host-only C++ library that declares and 459 registers the ops defined in that file. Clients can load this 460 library into local PyTorch using `torch.ops.load_library()` to 461 make them visible while authoring models. 462 functions_yaml_target: A Buck target pointing to the `functions.yaml` 463 file to use. Optional, but at least one of `functions_yaml_target` 464 and `custom_ops_yaml_target` must be specified. 465 custom_ops_yaml_target: A Buck target pointing to the `custom_ops.yaml` 466 file to use. Optional, but at least one of `functions_yaml_target` 467 and `custom_ops_yaml_target` must be specified. 468 fallback_yaml_target: A Buck target pointing to the yaml file for fallback 469 purpose. We will merge `functions.yaml` with the fallback_yaml if exist. 470 define_static_targets: If True, defines extra "<name>_static" targets 471 for each of the internal cxx_libraries defined by this macro, each 472 with preferred_linkage="static". If false, does not define these 473 targets. 474 custom_ops_aten_kernel_deps: kernels for custom ops that can be registered 475 into PyTorch runtime. It needs to be depending on ATen basic types such 476 as `at::Tensor` and `c10::ScalarType` etc. If not provided, will auto 477 generate fake kernels for custom ops. 478 custom_ops_requires_runtime_registration: If false, don't generate 479 `<name>` target if `functions_yaml_target` is None. If true, always 480 generate `<name>` target no matter whether we have `functions_yaml_target`. 481 aten_mode: a boolean for whether we should use ATen kernels and ATen tensors. 482 visibility: Visibility of the C++ library targets. 483 deps: Additinal deps of the main C++ library. Needs to be in either `//executorch` or `//caffe2` module. 484 platforms: platforms args to runtime.cxx_library (only used when in xplat) 485 manual_registration: if true, generate RegisterKernels.cpp and RegisterKernels.h. 486 use_default_aten_ops_lib: If `aten_mode` is True AND this flag is True, use `torch_mobile_all_ops_et` for ATen operator library. 487 xplat_deps: Additional xplat deps, can be used to provide custom operator library. 488 fbcode_deps: Additional fbcode deps, can be used to provide custom operator library. 489 compiler_flags: compiler_flags args to runtime.cxx_library 490 dtype_selective_build: In additional to operator selection, dtype selective build further selects the dtypes for each operator. Can be used with model or dict selective build APIs, where dtypes can be specified. Note: this is only available in xplat. 491 feature: Product-Feature Hierarchy (PFH). For internal use only, required for FoA in production. See: https://fburl.com/wiki/2wzjpyqy 492 """ 493 if functions_yaml_target and aten_mode: 494 fail("{} is providing functions_yaml_target in ATen mode, it will be ignored. `native_functions.yaml` will be the source of truth.".format(name)) 495 496 if not aten_mode and not functions_yaml_target and not custom_ops_yaml_target: 497 fail("At least one of functions_yaml_target, custom_ops_yaml_target needs to be provided") 498 499 aten_suffix = "_aten" if aten_mode else "" 500 501 # merge functions.yaml with fallback yaml 502 if functions_yaml_target: 503 merge_yaml_name = name + "_merge_yaml" 504 cmd = selects.apply(functions_yaml_target, lambda value: "$(exe fbsource//xplat/executorch/codegen/tools:merge_yaml) " + 505 "--functions_yaml_path=$(location {}) --output_dir=$OUT ".format(value)) 506 if fallback_yaml_target: 507 cmd = cmd + "--fallback_yaml_path=$(location {}) ".format(fallback_yaml_target) 508 runtime.genrule( 509 name = merge_yaml_name, 510 macros_only = False, 511 cmd = cmd, 512 outs = {"merged.yaml": ["merged.yaml"]}, 513 default_outs = ["."], 514 platforms = platforms, 515 ) 516 functions_yaml_path = "$(location :{}[merged.yaml])".format(merge_yaml_name) 517 else: 518 functions_yaml_path = None 519 if custom_ops_yaml_target: 520 custom_ops_yaml_path = selects.apply(custom_ops_yaml_target, lambda value: "$(location {})".format(value)) 521 else: 522 custom_ops_yaml_path = None 523 524 genrules, libs = _prepare_genrule_and_lib( 525 name = name, 526 functions_yaml_path = functions_yaml_path, 527 custom_ops_yaml_path = custom_ops_yaml_path, 528 custom_ops_requires_runtime_registration = custom_ops_requires_runtime_registration, 529 aten_mode = aten_mode, 530 manual_registration = manual_registration, 531 ) 532 533 # genrule for selective build from static operator list 534 oplist_dir_name = name + "_pt_oplist" 535 runtime.genrule( 536 name = oplist_dir_name, 537 macros_only = False, 538 cmd = ("$(exe fbsource//xplat/executorch/codegen/tools:gen_all_oplist) " + 539 "--model_file_list_path $(@query_outputs 'attrfilter(labels, et_operator_library, deps(set({deps})))') " + 540 "--allow_include_all_overloads " + 541 "--output_dir $OUT ").format(deps = " ".join(["\"{}\"".format(d) for d in deps])), 542 outs = {"selected_operators.yaml": ["selected_operators.yaml"]}, 543 default_outs = ["."], 544 platforms = platforms, 545 ) 546 547 # genrule to generate selected_op_variants.h from selected_operators.yaml above 548 oplist_header_name = name + "_et_op_dtype_gen" 549 runtime.genrule( 550 name = oplist_header_name, 551 macros_only = False, 552 cmd = ("$(exe //executorch/codegen/tools:gen_selected_op_variants) " + 553 "--yaml_file_path $(location :{}[selected_operators.yaml]) " + 554 "--output_dir $OUT").format(oplist_dir_name), 555 outs = {"selected_op_variants": ["selected_op_variants.h"]}, 556 default_outs = ["."], 557 platforms = platforms, 558 visibility = visibility, 559 _is_external_target = True, 560 ) 561 562 # codegen genrule(s). For ATen mode we expect two genrules, one for ATen ops one for custom ops. 563 for genrule_name in genrules: 564 genrules[genrule_name]["cmd"].append( 565 "--op_selection_yaml_path=$(location :{}[selected_operators.yaml])".format(oplist_dir_name), 566 ) 567 my_cmd = "" 568 for rule_substr in genrules[genrule_name]["cmd"]: 569 if my_cmd != "": 570 my_cmd += " " 571 my_cmd += rule_substr 572 runtime.genrule( 573 name = genrule_name, 574 cmd = my_cmd, 575 outs = {f: [f] for f in genrules[genrule_name]["outs"]}, 576 default_outs = ["."], 577 platforms = platforms, 578 ) 579 580 portable_lib = [] 581 if dtype_selective_build and is_xplat() and "//executorch/kernels/portable:operators" in kernel_deps: 582 # Remove portable from kernel_deps as we're building it from source. 583 kernel_deps.remove("//executorch/kernels/portable:operators") 584 585 # Build portable lib. 586 portable_lib_name = name + "_portable_lib" 587 build_portable_lib(portable_lib_name, oplist_header_name, feature) 588 portable_lib = [":{}".format(portable_lib_name)] 589 590 # Exports headers that declare the function signatures of the C++ functions 591 # that map to entries in `functions.yaml` and `custom_ops.yaml`. 592 # For ATen mode, the headers will be `aten_Functions.h`, `aten_NativeFunctions.h` and `aten_UnboxingFunctions.h` 593 # along with headers declaring custom ops `Functions.h`, `NativeFunctions.h` and `UnboxingFunctions.h`. 594 header_lib = name + "_headers" 595 if header_lib in libs: 596 runtime.cxx_library( 597 name = header_lib, 598 srcs = [], 599 exported_headers = libs[header_lib]["headers"], 600 visibility = visibility, 601 # Relax visibility restrictions since deps may include targets 602 # outside of //executorch. 603 _is_external_target = True, 604 platforms = platforms, 605 compiler_flags = compiler_flags, 606 exported_deps = [ 607 "//executorch/codegen:macros", 608 "//executorch/runtime/kernel:kernel_runtime_context" + aten_suffix, 609 ], 610 feature = feature, 611 ) 612 613 if name in libs: 614 lib_name = name 615 runtime.cxx_library( 616 name = lib_name, 617 srcs = [ 618 ":{}[{}]".format(libs[lib_name]["genrule"], f) 619 for f in libs[lib_name]["srcs"] 620 ], 621 # Note that all of these generated headers are only used by this library 622 # target, and are not meant to be used by targets outside of this 623 # directory. 624 headers = libs[lib_name]["headers"], 625 exported_headers = libs[lib_name]["exported_headers"], 626 exported_preprocessor_flags = ["-DUSE_ATEN_LIB"] if aten_mode else [], 627 # link_whole is necessary because the operators register themselves via 628 # static initializers that run at program startup. 629 # @lint-ignore BUCKLINT link_whole 630 link_whole = True, 631 visibility = visibility, 632 # Operator Registration is done through static tables 633 compiler_flags = select({ 634 "DEFAULT": ["-Wno-global-constructors"], 635 "ovr_config//os:windows": [], 636 }) + compiler_flags, 637 deps = [ 638 "//executorch/runtime/kernel:operator_registry", 639 "//executorch/kernels/prim_ops:prim_ops_registry" + aten_suffix, 640 "//executorch/runtime/core:evalue" + aten_suffix, 641 "//executorch/codegen:macros", 642 ] + deps + kernel_deps + portable_lib, 643 exported_deps = [ 644 "//executorch/runtime/core/exec_aten:lib" + aten_suffix, 645 "//executorch/runtime/kernel:kernel_runtime_context" + aten_suffix, 646 ], 647 xplat_deps = xplat_deps, 648 fbcode_deps = fbcode_deps, 649 external_deps = ["libtorch"] if aten_mode and use_default_aten_ops_lib else [], 650 define_static_target = define_static_targets, 651 # Relax visibility restrictions since deps may include targets outside 652 # of //executorch. 653 _is_external_target = True, 654 platforms = platforms, 655 feature = feature, 656 ) 657 658 if custom_ops_yaml_target and custom_ops_requires_aot_registration: 659 exir_custom_ops_aot_lib( 660 name = "custom_ops_" + name, 661 yaml_target = custom_ops_yaml_target, 662 visibility = visibility, 663 kernels = custom_ops_aten_kernel_deps, 664 deps = deps + [":" + header_lib], 665 define_static_target = define_static_targets, 666 platforms = platforms, 667 ) 668