Skip to content

Commit f9e6d76

Browse files
jerryzh168facebook-github-bot
authored andcommitted
[quant][pt2e] change internal code to only import from _quantize_pt2e (pytorch#105162)
Summary: Pull Request resolved: pytorch#105162 X-link: pytorch/executorch#7 This is to make public api clear so that we can make implementation details change easier in the future Test Plan: CIs Reviewed By: andrewor14 Differential Revision: D47445767 fbshipit-source-id: 008e643a9c8996f7c6846abee1c15a72f6fb4c7d
1 parent fc2f87b commit f9e6d76

File tree

3 files changed

+31
-1
lines changed

3 files changed

+31
-1
lines changed

torch/ao/quantization/_pt2e/quantizer/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,13 @@
44
EdgeOrNode,
55
FixedQParamsQuantizationSpec,
66
OperatorConfig,
7+
OperatorPatternType,
78
QuantizationAnnotation,
89
QuantizationSpec,
910
QuantizationSpecBase,
1011
Quantizer,
1112
SharedQuantizationSpec,
13+
QuantizationConfig,
1214
)
1315
from .x86_inductor_quantizer import X86InductorQuantizer
1416

@@ -19,6 +21,8 @@
1921
"ComposableQuantizer",
2022
"EdgeOrNode",
2123
"OperatorConfig",
24+
"OperatorPatternType",
25+
"QuantizationConfig",
2226
"EmbeddingQuantizer",
2327
"Quantizer",
2428
"QNNPackQuantizer",

torch/ao/quantization/_pt2e/quantizer/quantizer.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
"SharedQuantizationSpec",
1717
"DerivedQuantizationSpec",
1818
"QuantizationAnnotation",
19+
"QuantizationConfig",
20+
"OperatorPatternType",
1921
]
2022

2123
# TODO: maybe remove torch.float32

torch/ao/quantization/_quantize_pt2e.py

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,31 @@
1616
from .fx.prepare import prepare as fx_prepare
1717
from .quantize_fx import _convert_to_reference_decomposed_fx
1818
from torch.ao.quantization import QConfigMapping
19-
from torch.ao.quantization._pt2e.quantizer import Quantizer
19+
# TODO: move quantizer to torch.ao.quantization
20+
from torch.ao.quantization._pt2e.quantizer import ( # noqa: F401
21+
OperatorConfig,
22+
OperatorPatternType,
23+
QuantizationConfig,
24+
Quantizer,
25+
QuantizationSpecBase,
26+
QuantizationSpec,
27+
FixedQParamsQuantizationSpec,
28+
SharedQuantizationSpec,
29+
DerivedQuantizationSpec,
30+
QuantizationAnnotation,
31+
QNNPackQuantizer,
32+
EmbeddingQuantizer,
33+
ComposableQuantizer,
34+
)
35+
from torch.ao.quantization._pt2e.quantizer.utils import ( # noqa: F401
36+
get_bias_qspec,
37+
get_input_act_qspec,
38+
get_output_act_qspec,
39+
get_weight_qspec,
40+
)
41+
from torch.ao.quantization._pt2e.quantizer.qnnpack_quantizer import ( # noqa: F401
42+
get_symmetric_quantization_config,
43+
)
2044
from torch.ao.quantization.backend_config import BackendConfig
2145

2246
from typing import Any, Tuple

0 commit comments

Comments
 (0)