Skip to content

Commit

Permalink
compress_weights telemetry update (#3011)
Browse files Browse the repository at this point in the history
### Changes

- Added telemetry for the `nncf.compress_weights` backend-specific
`impl`;
- Added missed wrapper for the `nncf.qantize` PyTorch `impl`;
- Fixed reported event for `nncf.quantize_with_accuracy_control`
OpenVINO `impl`.
- Added missed `app_name`, `app_version` fields for each event.

### Reason for changes

- Telemetry update.

### Related tickets

- 154833

### Tests

- TBD
  • Loading branch information
KodiaqQ authored Oct 18, 2024
1 parent 4e388d2 commit 7385c41
Show file tree
Hide file tree
Showing 5 changed files with 54 additions and 4 deletions.
17 changes: 16 additions & 1 deletion nncf/openvino/quantization/quantize_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@
from nncf.quantization.quantize_model import is_model_no_batchwise_support
from nncf.quantization.quantize_model import quantize_with_tune_hyperparams
from nncf.quantization.quantize_model import warning_model_no_batchwise_support
from nncf.quantization.telemetry_extractors import CompressionStartedWithCompressWeightsApi
from nncf.quantization.telemetry_extractors import CompressionStartedWithQuantizeApi
from nncf.quantization.telemetry_extractors import CompressionStartedWithQuantizeWithAccuracyControlApi
from nncf.scopes import IgnoredScope
from nncf.scopes import validate_ignored_scope
from nncf.telemetry.decorator import tracked_function
Expand Down Expand Up @@ -190,7 +192,8 @@ def native_quantize_impl(


@tracked_function(
NNCF_OV_CATEGORY, [CompressionStartedWithQuantizeApi(), "target_device", "preset", "max_drop", "drop_type"]
NNCF_OV_CATEGORY,
[CompressionStartedWithQuantizeWithAccuracyControlApi(), "target_device", "preset", "max_drop", "drop_type"],
)
def quantize_with_accuracy_control_impl(
model: ov.Model,
Expand Down Expand Up @@ -366,6 +369,18 @@ def quantize_impl(
)


@tracked_function(
NNCF_OV_CATEGORY,
[
CompressionStartedWithCompressWeightsApi(),
"mode",
"awq",
"scale_estimation",
"gptq",
"lora_correction",
"backup_mode",
],
)
def compress_weights_impl(
model: ov.Model,
dataset: Dataset,
Expand Down
5 changes: 5 additions & 0 deletions nncf/quantization/telemetry_extractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,8 @@ def extract(self, _: Any) -> CollectedEvent:
class CompressionStartedWithQuantizeWithAccuracyControlApi(TelemetryExtractor):
def extract(self, _: Any) -> CollectedEvent:
return CollectedEvent(name="compression_started", data="quantize_with_accuracy_control_api")


class CompressionStartedWithCompressWeightsApi(TelemetryExtractor):
def extract(self, _: Any) -> CollectedEvent:
return CollectedEvent(name="compression_started", data="compress_weights_api")
2 changes: 2 additions & 0 deletions nncf/telemetry/extractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,6 @@ class VerbatimTelemetryExtractor(TelemetryExtractor):
def extract(self, argvalue: SerializableData) -> CollectedEvent:
if isinstance(argvalue, Enum):
argvalue = str(argvalue.value)
if isinstance(argvalue, bool):
argvalue = "enabled" if argvalue else "disabled"
return CollectedEvent(name=self._argname, data=argvalue)
17 changes: 14 additions & 3 deletions nncf/telemetry/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,10 +92,12 @@ class NNCFTelemetry(ITelemetry):
MEASUREMENT_ID = "G-W5E9RNLD4H"

def __init__(self):
self._app_name = "nncf"
self._app_version = __version__
try:
self._impl = Telemetry(
app_name="nncf",
app_version=__version__,
app_name=self._app_name,
app_version=self._app_version,
tid=self.MEASUREMENT_ID,
backend="ga4",
enable_opt_in_dialog=False,
Expand All @@ -121,7 +123,16 @@ def send_event(
):
if event_value is None:
event_value = 1
self._impl.send_event(event_category, event_action, event_label, event_value, force_send, **kwargs)
self._impl.send_event(
event_category=event_category,
event_action=event_action,
event_label=event_label,
event_value=event_value,
app_name=self._app_name,
app_version=self._app_version,
force_send=force_send,
**kwargs,
)

@skip_if_raised
def end_session(self, category: str, **kwargs):
Expand Down
17 changes: 17 additions & 0 deletions nncf/torch/quantization/quantize_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,18 @@
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.quantization.algorithms.weight_compression.algorithm import WeightCompression
from nncf.quantization.quantize_model import warning_model_no_batchwise_support
from nncf.quantization.telemetry_extractors import CompressionStartedWithCompressWeightsApi
from nncf.quantization.telemetry_extractors import CompressionStartedWithQuantizeApi
from nncf.scopes import IgnoredScope
from nncf.telemetry.decorator import tracked_function
from nncf.telemetry.events import NNCF_PT_CATEGORY
from nncf.torch.graph.operator_metatypes import OPERATIONS_OUTPUT_HAS_NO_BATCH_AXIS
from nncf.torch.model_creation import wrap_model

DEFAULT_RANGE_TYPE = "mean_min_max"


@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedWithQuantizeApi(), "target_device", "preset"])
def quantize_impl(
model: torch.nn.Module,
calibration_dataset: Dataset,
Expand Down Expand Up @@ -81,6 +86,18 @@ def quantize_impl(
return quantized_model


@tracked_function(
NNCF_PT_CATEGORY,
[
CompressionStartedWithCompressWeightsApi(),
"mode",
"awq",
"scale_estimation",
"gptq",
"lora_correction",
"backup_mode",
],
)
def compress_weights_impl(
model: torch.nn.Module,
dataset: Dataset,
Expand Down

0 comments on commit 7385c41

Please sign in to comment.