Skip to content

Commit

Permalink
[Conformance][TorchFX] --validate-in-backend option
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Oct 23, 2024
1 parent e1c1fba commit 16f3b97
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 10 deletions.
5 changes: 5 additions & 0 deletions tests/post_training/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,11 @@ def pytest_addoption(parser):
parser.addoption("--fp32", action="store_true", help="Test original model")
parser.addoption("--cuda", action="store_true", help="Enable CUDA_TORCH backend")
parser.addoption("--benchmark", action="store_true", help="Run benchmark_app")
parser.addoption(
"--validate-in-backend",
action="store_true",
help="Validate quantized model in native backend, not in openvino.",
)
parser.addoption(
"--extra-columns",
action="store_true",
Expand Down
2 changes: 2 additions & 0 deletions tests/post_training/pipelines/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ def __init__(
reference_data: dict,
no_eval: bool,
run_benchmark_app: bool,
validate_in_backend: bool = False,
params: dict = None,
batch_size: int = 1,
memory_monitor: bool = False,
Expand All @@ -227,6 +228,7 @@ def __init__(
self.memory_monitor = memory_monitor
self.no_eval = no_eval
self.run_benchmark_app = run_benchmark_app
self.validate_in_backend = validate_in_backend
self.output_model_dir: Path = self.output_dir / self.reported_name / self.backend.value
self.output_model_dir.mkdir(parents=True, exist_ok=True)
self.model_name = f"{self.reported_name}_{self.backend.value}"
Expand Down
46 changes: 36 additions & 10 deletions tests/post_training/pipelines/image_classification_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import nncf
from nncf.common.logging.track_progress import track
from tests.post_training.pipelines.base import DEFAULT_VAL_THREADS
from tests.post_training.pipelines.base import BackendType
from tests.post_training.pipelines.base import PTQTestPipeline


Expand All @@ -33,18 +34,15 @@ def prepare_calibration_dataset(self):

self.calibration_dataset = nncf.Dataset(loader, self.get_transform_calibration_fn())

def _validate(self):
val_dataset = datasets.ImageFolder(root=self.data_dir / "imagenet" / "val", transform=self.transform)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, num_workers=2, shuffle=False)

dataset_size = len(val_loader)

# Initialize result tensors for async inference support.
predictions = np.zeros((dataset_size))
references = -1 * np.ones((dataset_size))
def _validate_ov(
self,
val_loader: torch.utils.data.DataLoader,
predictions: np.ndarray,
references: np.ndarray,
dataset_size: int,
):

core = ov.Core()

if os.environ.get("INFERENCE_NUM_THREADS"):
# Set CPU_THREADS_NUM for OpenVINO inference
inference_num_threads = os.environ.get("INFERENCE_NUM_THREADS")
Expand Down Expand Up @@ -73,6 +71,34 @@ def process_result(request, userdata):
references[i] = target

infer_queue.wait_all()
return predictions, references

def _validate_torch_compile(
self, val_loader: torch.utils.data.DataLoader, predictions: np.ndarray, references: np.ndarray
):
compiled_model = torch.compile(self.compressed_model, backend="openvino")
for i, (images, target) in enumerate(val_loader):
# W/A for memory leaks when using torch DataLoader and OpenVINO
pred = compiled_model(images)
pred = torch.argmax(pred, dim=1)
predictions[i] = pred.numpy()
references[i] = target.numpy()
return predictions, references

def _validate(self):
val_dataset = datasets.ImageFolder(root=self.data_dir / "imagenet" / "val", transform=self.transform)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, num_workers=2, shuffle=False)

dataset_size = len(val_loader)

# Initialize result tensors for async inference support.
predictions = np.zeros((dataset_size))
references = -1 * np.ones((dataset_size))

if self.validate_in_backend and self.backend == BackendType.FX_TORCH:
predictions, references = self._validate_torch_compile(val_loader, predictions, references)
else:
predictions, references = self._validate_ov(val_loader, predictions, references, dataset_size)

acc_top1 = accuracy_score(predictions, references)

Expand Down
7 changes: 7 additions & 0 deletions tests/post_training/test_quantize_conformance.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,11 @@ def fixture_run_benchmark_app(pytestconfig):
return pytestconfig.getoption("benchmark")


@pytest.fixture(scope="session", name="validate_in_backend")
def fixture_validate_in_backend(pytestconfig):
return pytestconfig.getoption("validate_in_backend")


@pytest.fixture(scope="session", name="extra_columns")
def fixture_extra_columns(pytestconfig):
return pytestconfig.getoption("extra_columns")
Expand Down Expand Up @@ -262,6 +267,7 @@ def test_ptq_quantization(
run_torch_cuda_backend: bool,
subset_size: Optional[int],
run_benchmark_app: bool,
validate_in_backend: bool,
capsys: pytest.CaptureFixture,
extra_columns: bool,
memory_monitor: bool,
Expand Down Expand Up @@ -289,6 +295,7 @@ def test_ptq_quantization(
"data_dir": data_dir,
"no_eval": no_eval,
"run_benchmark_app": run_benchmark_app,
"validate_in_backend": validate_in_backend,
"batch_size": batch_size,
"memory_monitor": memory_monitor,
}
Expand Down

0 comments on commit 16f3b97

Please sign in to comment.