/aosp_15_r20/external/pytorch/test/quantization/fx/ |
H A D | test_numeric_suite_fx.py | 21 prepare_fx, 317 …mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, … 344 …mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, … 362 …mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(4, 4),… 385 …mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, … 400 … mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs) 413 … mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs) 414 … mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs) 424 … mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs) 426 … mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs) [all …]
|
H A D | test_quantize_fx.py | 19 prepare_fx, 557 m = prepare_fx(m, qconfig_mapping, 656 prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) 691 m = prepare_fx(model, qconfig_dict, example_inputs=(torch.randn(1, 5),)) 1609 prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) 1789 m = prepare_fx(m, qconfig_dict, example_inputs=inputs) 1852 m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) 1887 m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) 1957 m = prepare_fx( 1974 ref_m = prepare_fx( [all …]
|
H A D | test_equalize_fx.py | 10 from torch.ao.quantization.quantize_fx import prepare_fx, convert_fx 278 prepared = prepare_fx( 314 prepared = prepare_fx( 338 prepared = prepare_fx( 368 prepared = prepare_fx( 379 prepared = prepare_fx( 431 prepared = prepare_fx( 484 prepared = prepare_fx( 544 prepared = prepare_fx( 783 prepared = prepare_fx( [all …]
|
H A D | test_model_report_fx.py | 123 …model_prep = torch.ao.quantization.quantize_fx.prepare_fx(model, q_config_mapping, input) # prep … 821 model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input) 895 … model_prep = quantize_fx.prepare_fx(model, q_config_mapping, model.get_example_inputs()[0]) 944 model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input) 1046 model_prep_full = quantize_fx.prepare_fx(model_full, q_config_mapping, example_input) 1047 … model_prep_single = quantize_fx.prepare_fx(model_single, q_config_mapping, example_input) 1201 prepared = quantize_fx.prepare_fx(TwoThreeOps(), qconfig_mapping, example_input) 1245 prepared = quantize_fx.prepare_fx( 1951 model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
|
/aosp_15_r20/external/executorch/exir/tests/ |
H A D | test_quant_fusion_pass.py | 27 prepare_fx, 52 m = prepare_fx( 90 m = prepare_fx( 146 m = prepare_fx( 192 m = prepare_fx( 284 m = prepare_fx( 340 m = prepare_fx(
|
H A D | test_quantization.py | 20 from torch.ao.quantization.quantize_fx import prepare_fx 99 m_fx = prepare_fx(
|
H A D | test_memory_planning.py | 48 prepare_fx, 447 prepare_fx( 460 prepare_fx(
|
/aosp_15_r20/external/pytorch/test/mobile/ |
H A D | test_quantize_fx_lite_script_module.py | 10 from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx 43 m = prepare_fx( 65 m = prepare_fx(m, qconfig_dict, example_inputs=torch.randn(1, 1, 1, 1)) 84 model = prepare_fx(
|
/aosp_15_r20/external/pytorch/test/ao/sparsity/ |
H A D | test_composability.py | 14 prepare_fx, 362 mod = prepare_fx(mod, qconfig_mapping, (example,)) 426 mod = prepare_fx(mod, qconfig_mapping, (example,)) 493 mod = prepare_fx(mod, qconfig_mapping, (example,)) 598 mod = prepare_fx(mod, qconfig_mapping, (example,))
|
/aosp_15_r20/external/executorch/backends/xnnpack/test/ |
H A D | test_xnnpack_utils.py | 67 prepare_fx, 288 prepared = prepare_fx( 393 prepared_linear = prepare_fx( 486 prepared_linear = prepare_fx(
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/ |
H A D | README.md | 8 (prepare_fx/prepare_qat_fx) / 34 float_model:::nofs --> prepare_fx:::sub 35 QConfigMapping:::nofs --> prepare_fx 36 BackendConfig:::nofs --> prepare_fx 37 subgraph prepare_fx["`_(prepare_fx/prepare_qat_fx)_`"] 40 prepare_fx --> Calibrate/Train:::nofs --> convert_fx:::sub 54 `prepare_fx`: 138 …s exactly the same as eager mode quantization, just called inside the `prepare_fx/prepare_qat_fx` …
|
H A D | lstm_utils.py | 15 from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx 102 cell = prepare_fx(cell, cell_qm, example_inputs, backend_config=backend_config)
|
/aosp_15_r20/external/executorch/exir/backend/test/demos/ |
H A D | test_xnnpack_qnnpack.py | 41 prepare_fx, 73 prepared_mod = prepare_fx(
|
/aosp_15_r20/external/executorch/examples/xnnpack/quantization/ |
H A D | example.py | 26 prepare_fx, 78 m_fx = prepare_fx(
|
/aosp_15_r20/external/executorch/extension/pybindings/test/ |
H A D | make_test.py | 256 prepare_fx, 264 m = prepare_fx(
|
/aosp_15_r20/external/pytorch/test/quantization/pt2e/ |
H A D | test_xnnpack_quantizer.py | 26 prepare_fx, 763 model_fx = prepare_fx( 827 model_fx = prepare_fx( 1037 m_fx = prepare_fx(
|
/aosp_15_r20/external/pytorch/torch/quantization/ |
H A D | quantize_fx.py | 21 prepare_fx,
|
/aosp_15_r20/external/pytorch/test/quantization/core/experimental/ |
H A D | apot_fx_graph_mode_qat.py | 30 …prepared_model = prepare_fx(copy.deepcopy(float_model), qconfig_dict) # fuse modules and insert o…
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | quantization.rst | 436 …and the configuration is done through `qconfig_mapping` (an argument of the `prepare_fx` function). 462 model_prepared = quantize_fx.prepare_fx(model_to_quantize, qconfig_mapping, example_inputs) 475 model_prepared = quantize_fx.prepare_fx(model_to_quantize, qconfig_mapping, example_inputs) 1067 mp = torch.ao.quantization.quantize_fx.prepare_fx( 1197 prepare_orig = prepare_fx(m, {'' : default_qconfig}) 1206 prepared = prepare_fx(m2, {'' : default_qconfig}) 1217 prepare_orig = prepare_fx(m, {'' : default_qconfig}) 1230 …rch Model that is not symbolically traceable to `torch.ao.quantization.prepare_fx` or `torch.ao.qu…
|
H A D | quantization-support.rst | 65 prepare_fx
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/ |
H A D | quantize_fx.py | 252 def prepare_fx( function
|
/aosp_15_r20/external/executorch/exir/backend/test/ |
H A D | test_backends.py | 60 prepare_fx, 995 prepared_linear = prepare_fx(
|
H A D | test_backends_lifted.py | 63 prepare_fx, 979 prepared_linear = prepare_fx(
|
/aosp_15_r20/external/pytorch/test/quantization/bc/ |
H A D | test_backward_compatibility.py | 239 mp = quantize_fx.prepare_fx(m, {"": qconfig}, example_inputs=example_inputs)
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/_model_report/ |
H A D | README.md | 27 prepared_model = quantize_fx.prepare_fx(model, qconfig_mapping, example_input)
|