Skip to content

Commit

Permalink
New release (0.2.7) + Fix build (#647)
Browse files Browse the repository at this point in the history
  • Loading branch information
casper-hansen authored Nov 16, 2024
1 parent a28c747 commit 3b0ae79
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 66 deletions.
36 changes: 5 additions & 31 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,22 +32,18 @@ jobs:
const script = require('.github/workflows/scripts/github_create_release.js')
await script(github, context, core)
build_cuda_wheels:
name: Build AWQ with CUDA
build_wheels:
name: Build AWQ
runs-on: ${{ matrix.os }}
needs: release

strategy:
matrix:
os: [ubuntu-20.04, windows-latest]
pyver: ["3.8", "3.9", "3.10", "3.11"]
cuda: ["12.1.1"]
pyver: ["3.9", "3.10", "3.11", "3.12"]
defaults:
run:
shell: pwsh
env:
PYPI_CUDA_VERSION: "12.1.1"
CUDA_VERSION: ${{ matrix.cuda }}

steps:
- name: Free Disk Space
Expand Down Expand Up @@ -81,38 +77,16 @@ jobs:

- name: Install Dependencies
run: |
# Install CUDA toolkit
mamba install -y 'cuda' -c "nvidia/label/cuda-${env:CUDA_VERSION}"
# Env variables
$env:CUDA_PATH = $env:CONDA_PREFIX
$env:CUDA_HOME = $env:CONDA_PREFIX
# Install torch
$cudaVersion = $env:CUDA_VERSION.Replace('.', '')
$cudaVersionPytorch = $cudaVersion.Substring(0, $cudaVersion.Length - 1)
$pytorchVersion = "torch==2.3.1"
python -m pip install --upgrade --no-cache-dir $pytorchVersion+cu$cudaVersionPytorch --index-url https://download.pytorch.org/whl/cu$cudaVersionPytorch
python -m pip install build setuptools wheel ninja requests
python -m pip install --upgrade --no-cache-dir torch==2.5.1
python -m pip install build setuptools wheel
# Print version information
python --version
python -c "import torch; print('PyTorch:', torch.__version__)"
python -c "import torch; print('CUDA:', torch.version.cuda)"
python -c "import os; print('CUDA_HOME:', os.getenv('CUDA_HOME', None))"
python -c "from torch.utils import cpp_extension; print (cpp_extension.CUDA_HOME)"
- name: Build Wheel
run: |
$env:CUDA_PATH = $env:CONDA_PREFIX
$env:CUDA_HOME = $env:CONDA_PREFIX
# Only add +cu118 to wheel if not releasing on PyPi
if ( $env:CUDA_VERSION -eq $env:PYPI_CUDA_VERSION ){
$env:PYPI_BUILD = 1
}
$env:PYPI_FORCE_TAGS = 1
python setup.py sdist bdist_wheel
- name: Upload Assets
Expand Down
2 changes: 1 addition & 1 deletion awq/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
__version__ = "0.2.6"
__version__ = "0.2.7"
from awq.models.auto import AutoAWQForCausalLM
2 changes: 1 addition & 1 deletion scripts/download_wheels.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash

# Set variables
AWQ_VERSION="0.2.6"
AWQ_VERSION="0.2.7"
RELEASE_URL="https://api.github.com/repos/casper-hansen/AutoAWQ/releases/tags/v${AWQ_VERSION}"

# Create a directory to download the wheels
Expand Down
35 changes: 2 additions & 33 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,8 @@
import torch
from pathlib import Path
from setuptools import setup, find_packages
from torch.utils.cpp_extension import CUDAExtension

AUTOAWQ_VERSION = "0.2.6"
PYPI_BUILD = os.getenv("PYPI_BUILD", "0") == "1"
AUTOAWQ_VERSION = "0.2.7"
INSTALL_KERNELS = os.getenv("INSTALL_KERNELS", "0") == "1"
IS_CPU_ONLY = not torch.backends.mps.is_available() and not torch.cuda.is_available()
TORCH_VERSION = str(os.getenv("TORCH_VERSION", None) or torch.__version__).split('+', maxsplit=1)[0]
Expand All @@ -14,23 +12,6 @@
if CUDA_VERSION:
CUDA_VERSION = "".join(CUDA_VERSION.split("."))[:3]

ROCM_VERSION = os.getenv("ROCM_VERSION", None) or torch.version.hip
if ROCM_VERSION:
ROCM_VERSION_LEN = min(len(ROCM_VERSION.split(".")), 3)
ROCM_VERSION = "".join(ROCM_VERSION.split("."))[:ROCM_VERSION_LEN]

if not PYPI_BUILD:
if IS_CPU_ONLY:
AUTOAWQ_VERSION += "+cpu"
elif CUDA_VERSION:
AUTOAWQ_VERSION += f"+cu{CUDA_VERSION}"
elif ROCM_VERSION:
AUTOAWQ_VERSION += f"+rocm{ROCM_VERSION}"
else:
raise RuntimeError(
"Your system must have either Nvidia or AMD GPU to build this package."
)

common_setup_kwargs = {
"version": AUTOAWQ_VERSION,
"name": "autoawq",
Expand All @@ -50,10 +31,10 @@
"Environment :: GPU :: NVIDIA CUDA :: 12",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: C++",
],
}
Expand Down Expand Up @@ -82,18 +63,6 @@
elif IS_CPU_ONLY:
requirements.append("intel-extension-for-pytorch>=2.4.0")

force_extension = os.getenv("PYPI_FORCE_TAGS", "0")
if force_extension == "1":
# NOTE: We create an empty CUDAExtension because torch helps us with
# creating the right boilerplate to enable correct targeting of
# the autoawq-kernels package
common_setup_kwargs["ext_modules"] = [
CUDAExtension(
name="test_kernel",
sources=[],
)
]

setup(
packages=find_packages(),
install_requires=requirements,
Expand Down

0 comments on commit 3b0ae79

Please sign in to comment.