diff --git a/setup.py b/setup.py index 0ea5a7b08b..227248f929 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ from setuptools import find_packages, setup -VERSION = "0.15.2.dev0" +VERSION = "0.16.0" extras = {} extras["quality"] = [ diff --git a/src/peft/__init__.py b/src/peft/__init__.py index 8fb4421de2..68db70c9c8 100644 --- a/src/peft/__init__.py +++ b/src/peft/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "0.15.2.dev0" +__version__ = "0.16.0" from .auto import ( MODEL_TYPE_TO_PEFT_MODEL_MAPPING, diff --git a/tests/test_gpu_examples.py b/tests/test_gpu_examples.py index 564c4e0c64..7fa9b539f3 100644 --- a/tests/test_gpu_examples.py +++ b/tests/test_gpu_examples.py @@ -3807,7 +3807,7 @@ def setUp(self): # torchao breaks with fp16 and if a previous test uses fp16, transformers will set this env var, which affects # subsequent tests, therefore the env var needs to be cleared explicitly # - # TODO: remove this once https://github.com/huggingface/transformers/pull/34886 is merged + # TODO: remove this once https://github.com/huggingface/transformers/pull/37259 is merged os.environ.pop("ACCELERATE_MIXED_PRECISION", None) def tearDown(self): diff --git a/tests/testing_common.py b/tests/testing_common.py index cf115e9511..7b0309b649 100644 --- a/tests/testing_common.py +++ b/tests/testing_common.py @@ -1393,9 +1393,6 @@ def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs): def _test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): if not issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") - if ("gemma" in model_id.lower()) and (config_cls == PrefixTuningConfig): - # TODO might be caused by the 4d causal attention mask of gemma - return pytest.skip("Prefix tuning + gemma is currently failing") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id)