diff --git a/.github/workflows/test-compilation.yml b/.github/workflows/test-compilation.yml new file mode 100644 index 00000000..1ed474a4 --- /dev/null +++ b/.github/workflows/test-compilation.yml @@ -0,0 +1,42 @@ +name: Investigate Linux compilation issues + +on: + push: + branches: + - main + paths: + - "optimum/quanto/**" + - "examples/**" + - "pyproject.toml" + pull_request: + types: [assigned, opened, synchronize, reopened] + paths: + - "optimum/quanto/**" + - "examples/**" + - "pyproject.toml" + +jobs: + investigate: + runs-on: [self-hosted, single-gpu , nvidia-gpu, a10, ci] + strategy: + fail-fast: false + container: + image: pytorch/pytorch:2.3.1-cuda12.1-cudnn8-devel + options: --gpus 0 + + steps: + - uses: actions/checkout@v2 + - name: Check CUDA installation + run: | + nvcc -V + + - name: Build and install packages + run: | + pip install --upgrade pip + pip install .[examples] + + # Run examples + - name: Run Dummy example + run: | + pip show -f optimum-quanto + python examples/check_extension_compiled.py diff --git a/examples/check_extension_compiled.py b/examples/check_extension_compiled.py new file mode 100644 index 00000000..cbe5fb92 --- /dev/null +++ b/examples/check_extension_compiled.py @@ -0,0 +1,9 @@ +import torch +from optimum.quanto.library.ext.cpp import ext as cpp_ext +from optimum.quanto.library.ext.cuda import ext as cuda_ext + +assert cpp_ext.lib is not None +print("CPP extension is available") +if torch.cuda.is_available(): + assert cuda_ext.lib is not None + print("CUDA extension is available") diff --git a/optimum/quanto/library/ext/extension.py b/optimum/quanto/library/ext/extension.py index 0eccc6a4..b3cea1c9 100644 --- a/optimum/quanto/library/ext/extension.py +++ b/optimum/quanto/library/ext/extension.py @@ -45,6 +45,7 @@ def lib(self): extra_cflags=self.extra_cflags, extra_cuda_cflags=self.extra_cuda_cflags, build_directory=self.build_directory, + verbose=True, ) if not os.path.exists(version_file): with open(version_file, "w") as f: