Skip to content

Commit

Permalink
Deprecate quantsim util APIs
Browse files Browse the repository at this point in the history
Signed-off-by: Kyunggeun Lee <[email protected]>
  • Loading branch information
quic-kyunggeu committed Dec 18, 2024
1 parent 5e9ede4 commit 2f76988
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -1735,6 +1735,7 @@ def run_modules_for_traced_custom_marker(self, module_list: List[torch.nn.Module



@deprecated("Use pickle.dump instead")
def save_checkpoint(quant_sim_model: _QuantizationSimModelInterface, file_path: str):
"""
This API provides a way for the user to save a checkpoint of the quantized model which can
Expand All @@ -1749,6 +1750,7 @@ def save_checkpoint(quant_sim_model: _QuantizationSimModelInterface, file_path:
pickle.dump(quant_sim_model, file)


@deprecated("Use pickle.load instead")
def load_checkpoint(file_path: str) -> _QuantizationSimModelInterface:
"""
Load the quantized model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,17 @@ def _remove_quantization_wrappers(cls, starting_module, list_of_modules_to_exclu
cls._remove_quantization_wrappers(module, list_of_modules_to_exclude)


@deprecated("Use QuantizationSimModel.load_encodings instead.")
@deprecated("""
Use QuantizationSimModel.load_encodings with the following keyword arguments instead:
```
sim.load_encodings(encoding_path
strict=True,
partial=False,
requires_grad=None,
allow_overwrite=None)
```
"""
)
def load_encodings_to_sim(quant_sim_model: _QuantizationSimModelBase, pytorch_encoding_path: str):
"""
Loads the saved encodings to quant sim model. The encoding filename to load should end in _torch.encodings,
Expand All @@ -549,6 +559,17 @@ def load_encodings_to_sim(quant_sim_model: _QuantizationSimModelBase, pytorch_en
allow_overwrite=None)


@deprecated(r"""
Use aimet_torch.nn.compute_encodings contextmanager on each sim.model instead. For example:
```
with torch.no_grad(), \
aimet_torch.v2.nn.compute_encodings(sim_0.model), \
aimet_torch.v2.nn.compute_encodings(sim_1.model), \
aimet_torch.v2.nn.compute_encodings(sim_2.model):
# Run forward pass with calibration dataset
```
"""
)
def compute_encodings_for_sims(sim_list: Sequence[QuantizationSimModel], forward_pass_callback: Callable,
forward_pass_callback_args: Any):
"""
Expand Down

0 comments on commit 2f76988

Please sign in to comment.