Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a flag protected pass to lower fake_quant annotation. #415

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions tflite/python/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,6 +447,7 @@ def build_conversion_flags(
use_buffer_offset=False,
reduce_type_precision=False,
qdq_conversion_mode=None,
strict_qdq_mode=False,
disable_per_channel_quantization_for_dense_layers=False,
enable_composite_direct_lowering=False,
model_origin_framework=lite_constants.UNSET,
Expand Down Expand Up @@ -578,6 +579,9 @@ def build_conversion_flags(
This could have side effects e.g. reduced flatbuffer size.
qdq_conversion_mode: If set, assume input model is a quantized model
represented with QDQ ops and convert to quantized kernels.
strict_qdq_mode: If set, adheres to the QDQ annotations added by the
framework when possible rather than quantizing any op that is possible to
quantize.
disable_per_channel_quantization_for_dense_layers: If set, disables per
channel end enables per tensor integer quantization for weights in Dense
layers. The flag works only for integer quantized model.
Expand Down Expand Up @@ -706,6 +710,7 @@ def build_conversion_flags(
conversion_flags.reduce_type_precision = reduce_type_precision
if qdq_conversion_mode is not None:
conversion_flags.qdq_conversion_mode = qdq_conversion_mode
conversion_flags.strict_qdq_mode = strict_qdq_mode
conversion_flags.disable_per_channel_quantization_for_dense_layers = (
disable_per_channel_quantization_for_dense_layers
)
Expand Down
2 changes: 2 additions & 0 deletions tflite/python/lite.py
Original file line number Diff line number Diff line change
Expand Up @@ -680,6 +680,7 @@ def __init__(self):
self._experimental_enable_composite_direct_lowering = False
self.model_origin_framework = constants.UNSET
self.canonicalizing_inf_as_min_max_float = True
self._experimental_strict_qdq = False

# Debug parameters
self.ir_dump_dir = None
Expand Down Expand Up @@ -837,6 +838,7 @@ def _get_base_converter_args(self):
self.experimental_stablehlo_quantizer_config
),
"qdq_conversion_mode": self._experimental_qdq_conversion_mode,
"strict_qdq_mode": self._experimental_strict_qdq,
"disable_per_channel_quantization_for_dense_layers": (
self._experimental_disable_per_channel_quantization_for_dense_layers
),
Expand Down