diff --git a/tests/post_training/pipelines/image_classification_base.py b/tests/post_training/pipelines/image_classification_base.py index 1026a5cd842..fbdcb218386 100644 --- a/tests/post_training/pipelines/image_classification_base.py +++ b/tests/post_training/pipelines/image_classification_base.py @@ -77,6 +77,13 @@ def _validate_torch_compile( self, val_loader: torch.utils.data.DataLoader, predictions: np.ndarray, references: np.ndarray ): # compiled_model = torch.compile(self.compressed_model, backend="openvino") + q_num = 0 + for node in self.compressed_model.graph.nodes: + if ".quantize_per" in str(node.target): + q_num += 1 + + print(f"Qunatize ops num: {q_num}") + compiled_model = torch.compile(self.compressed_model) for i, (images, target) in enumerate(val_loader): # W/A for memory leaks when using torch DataLoader and OpenVINO @@ -111,6 +118,8 @@ def _compress_torch_native(self): os.environ["TORCHINDUCTOR_FREEZING"] = "1" + from itertools import islice + from torch.ao.quantization.quantize_pt2e import convert_pt2e from torch.ao.quantization.quantize_pt2e import prepare_pt2e from torch.ao.quantization.quantizer.x86_inductor_quantizer import X86InductorQuantizer @@ -120,7 +129,8 @@ def _compress_torch_native(self): quantizer.set_global(get_default_x86_inductor_quantization_config()) prepared_model = prepare_pt2e(self.model, quantizer) - for data in self.calibration_dataset.get_inference_data(): + subset_size = self.compression_params.get("subset_size", 300) + for data in islice(self.calibration_dataset.get_inference_data(), subset_size): prepared_model(data) self.compressed_model = convert_pt2e(prepared_model) @@ -146,5 +156,5 @@ def _compress(self): if self.backend != BackendType.FX_TORCH: super()._compress() - # self._compress_torch_native() - self._compress_nncf_pt2e() + self._compress_torch_native() + # self._compress_nncf_pt2e() diff --git a/tests/post_training/test_quantize_conformance.py b/tests/post_training/test_quantize_conformance.py index 5c4fa176ad6..40e6298baf3 100644 --- a/tests/post_training/test_quantize_conformance.py +++ b/tests/post_training/test_quantize_conformance.py @@ -270,7 +270,7 @@ def test_ptq_quantization( err_msg = None test_model_param = None start_time = time.perf_counter() - try: + if True: if test_case_name not in ptq_reference_data: raise nncf.ValidationError(f"{test_case_name} does not exist in 'reference_data.yaml'") test_model_param = PTQ_TEST_CASES[test_case_name] @@ -295,9 +295,6 @@ def test_ptq_quantization( ) pipeline: BaseTestPipeline = pipeline_cls(**pipeline_kwargs) pipeline.run() - except Exception as e: - err_msg = str(e) - traceback.print_exc() if pipeline is not None: pipeline.cleanup_cache()