Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/test-docker-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
id: set-matrix
env:
ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
CHANGED_FILES: "${{ steps.changed-files.outputs.all_changed_files }}"
run: |
echo "matrix=${ALL_CHANGED_FILES}" >> $GITHUB_OUTPUT
echo "matrix=$(echo ${CHANGED_FILES} | sed -e 's/\\\"/\"/g')" >> $GITHUB_OUTPUT
build_modified_files:
needs: get_changed_files
name: Build Docker images on modified files
Expand Down
5 changes: 4 additions & 1 deletion docker/peft-gpu/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,10 @@ RUN apt-get update && \
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]

RUN conda run -n peft pip install --no-cache-dir bitsandbytes optimum gptqmodel
RUN conda run -n peft pip install --no-cache-dir bitsandbytes optimum

# GPTQmodel doesn't find torch without build isolation
RUN conda run -n peft pip install --no-build-isolation gptqmodel

RUN \
# Add eetq for quantization testing; needs to run without build isolation since the setup
Expand Down
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -52,5 +52,7 @@ markers = [

filterwarnings = [
"error::DeprecationWarning:transformers",
# in sync with tests/conftest.py regarding BPE deprecation
"ignore:.*BPE.__init__ will not create from files anymore.*:DeprecationWarning:transformers",
"error::FutureWarning:transformers",
]
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
]
extras["docs_specific"] = [
"black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434
"requests", # doc-builder has an implicit dependency on requests (setup.py doesn't mention it, pyproject does)
"hf-doc-builder",
]
extras["dev"] = extras["quality"] + extras["docs_specific"]
Expand Down
6 changes: 0 additions & 6 deletions src/peft/tuners/lora/gptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,11 +131,5 @@ def dispatch_gptq(
if isinstance(target_base_layer, BaseQuantLinear):
new_module = GPTQLoraLinear(target, adapter_name, config=config, **kwargs)
target.qweight = target_base_layer.qweight
else:
quant_linear = get_auto_gptq_quant_linear(cfg)

if quant_linear is not None and isinstance(target_base_layer, quant_linear):
new_module = GPTQLoraLinear(target, adapter_name, config=config, **kwargs)
target.qweight = target_base_layer.qweight

return new_module
Loading