Skip to content

Commit

Permalink
torch-fx tests are added to pre-commit
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Jun 26, 2024
1 parent b0ed99a commit c818a93
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 6 deletions.
44 changes: 44 additions & 0 deletions .github/workflows/precommit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,50 @@ jobs:
token: ${{ secrets.CODECOV_TOKEN }}
name: coverage_openvino
flags: OPENVINO
torchFX:
timeout-minutes: 40
defaults:
run:
shell: bash
runs-on: ubuntu-20.04-8-cores
env:
DEBIAN_FRONTEND: noninteractive
steps:
- name: Install dependencies
run : |
sudo apt-get update
sudo apt-get --assume-yes install gcc g++ build-essential ninja-build libgl1-mesa-dev libglib2.0-0
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
lfs: true
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: 3.8.18
cache: pip
- name: Runner info
continue-on-error: true
run: |
cat /etc/*release
cat /proc/cpuinfo
- name: Install NNCF and test requirements
run: make install-torch-fx-test
- name: Run TorchFX precommit test scope
run: |
make test-torch-fx
env:
NNCF_COVERAGE: 1
NUM_WORKERS: 4
- name: Upload coverage report as artifact
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: coverage_fx_cpu
path: ./coverage.xml
- name: Upload coverage report to codecov
uses: codecov/codecov-action@125fc84a9a348dbcf27191600683ec096ec9021c # v4.4.1
with:
token: ${{ secrets.CODECOV_TOKEN }}
name: coverage_fx_cpu
flags: TORCH
pytorch-cpu:
timeout-minutes: 40
defaults:
Expand Down
11 changes: 11 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,17 @@ test-examples-torch:
test-models-hub-torch:
pytest tests/torch/models_hub_test --junitxml ${JUNITXML_PATH}

###############################################################################
# TorchFX backend
install-torch-fx-test:
pip install -U pip
pip install -e .
pip install -r tests/torch_fx/requirements.txt

test-torch-fx:
pytest ${COVERAGE_ARGS} tests/torch_fx/ -ra \
--junitxml ${JUNITXML_PATH}

###############################################################################
# Common part
install-common-test:
Expand Down
6 changes: 6 additions & 0 deletions tests/torch_fx/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1,7 @@
-c ../../constraints.txt
pytest
pytest-cov
openvino
torch
torchvision
fastdownload==0.0.7
16 changes: 10 additions & 6 deletions tests/torch_fx/test_sanity.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@
BATCH_SIZE = 128


@pytest.fixture(name="tiny_imagenet_dataset", scope="module")
def tiny_imagenet_dataset_fixture():
return TinyImagenetDatasetManager(IMAGE_SIZE, BATCH_SIZE).create_data_loaders()


@dataclass
class SanitySampleCase:
model_id: str
Expand All @@ -47,7 +52,7 @@ class SanitySampleCase:
SanitySampleCase(
"resnet18",
"https://storage.openvinotoolkit.org/repositories/nncf/openvino_notebook_ckpts/302_resnet18_fp32_v1.pth",
55.23,
55.2,
51,
58,
),
Expand Down Expand Up @@ -113,13 +118,12 @@ def count_q_dq(model: torch.fx.GraphModule):


@pytest.mark.parametrize("test_case", MODELS)
def test_sanity(test_case: SanitySampleCase):
def test_sanity(test_case: SanitySampleCase, tiny_imagenet_dataset):
with disable_patching():
torch.manual_seed(42)
device = torch.device("cpu")
model = get_model(test_case.model_id, test_case.checkpoint_url, device)
_, val_dataloader, calibration_dataset = TinyImagenetDatasetManager(
IMAGE_SIZE, BATCH_SIZE
).create_data_loaders()
_, val_dataloader, calibration_dataset = tiny_imagenet_dataset

def transform_fn(data_item):
return data_item[0].to(device)
Expand All @@ -134,7 +138,7 @@ def transform_fn(data_item):
quantized_model = torch.compile(quantized_model, backend="openvino")

top1_int8 = validate(val_dataloader, quantized_model, device)
assert np.isclose(top1_int8, test_case.top1_int8_ref, atol=1e-2)
assert np.isclose(top1_int8, test_case.top1_int8_ref, atol=0.1)

num_q, num_dq = count_q_dq(quantized_model)
assert num_q == test_case.ref_num_q
Expand Down

0 comments on commit c818a93

Please sign in to comment.