Skip to content

Commit

Permalink
Prepare for PEFT release of v0.14.0 (#2258)
Browse files Browse the repository at this point in the history
- Bump versions
- Remove deprecated convert_pissa_to_lora argument
- Remove a pytest skip for older transformers versions
- Adjust some comments, docstrings
  • Loading branch information
BenjaminBossan authored Dec 6, 2024
1 parent 860f783 commit de88c70
Show file tree
Hide file tree
Showing 8 changed files with 12 additions and 60 deletions.
2 changes: 1 addition & 1 deletion examples/pissa_finetuning/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ The main advantage of PiSSA is concentrated during the training phase. For a tra
peft_model.save_pretrained(output_dir)
# Given the matrices $A_0$ and $B_0$, initialized by PiSSA and untrained, and the trained matrices $A$ and $B$,
# we can convert these to LoRA by setting $\Delta W = A \times B - A_0 \times B_0 = [A \mid A_0] \times [B \mid -B_0]^T = A'B'$.
peft_model.save_pretrained(output_dir, convert_pissa_to_lora="pissa_init")
peft_model.save_pretrained(output_dir, path_initial_model_for_weight_conversion="pissa_init")

```
This conversion enables the loading of LoRA on top of a standard base model:
Expand Down
2 changes: 1 addition & 1 deletion examples/pissa_finetuning/pissa_finetuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ class ScriptArguments(SFTConfig):
if script_args.convert_pissa_to_lora:
peft_model.save_pretrained(
os.path.join(script_args.output_dir, "pissa_lora"),
convert_pissa_to_lora=os.path.join(script_args.residual_model_name_or_path, "pissa_init"),
path_initial_model_for_weight_conversion=os.path.join(script_args.residual_model_name_or_path, "pissa_init"),
)
else:
peft_model.save_pretrained(
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from setuptools import find_packages, setup


VERSION = "0.13.3.dev0"
VERSION = "0.14.0"

extras = {}
extras["quality"] = [
Expand Down
2 changes: 1 addition & 1 deletion src/peft/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

__version__ = "0.13.3.dev0"
__version__ = "0.14.0"

from .auto import (
AutoPeftModel,
Expand Down
10 changes: 0 additions & 10 deletions src/peft/peft_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,6 @@ def save_pretrained(
selected_adapters: Optional[list[str]] = None,
save_embedding_layers: Union[str, bool] = "auto",
is_main_process: bool = True,
convert_pissa_to_lora: Optional[str] = None,
path_initial_model_for_weight_conversion: Optional[str] = None,
**kwargs: Any,
) -> None:
Expand All @@ -253,8 +252,6 @@ def save_pretrained(
is_main_process (`bool`, *optional*):
Whether the process calling this is the main process or not. Will default to `True`. Will not save the
checkpoint if not on the main process, which is important for multi device setups (e.g. DDP).
convert_pissa_to_lora (`str, *optional*`):
Deprecated. Use `path_initial_model_for_weight_conversion` instead.
path_initial_model_for_weight_conversion (`str, *optional*`):
The path to the initialized adapter, which is obtained after initializing the model with PiSSA or OLoRA
and before performing any training. When `path_initial_model_for_weight_conversion` is not None, the
Expand All @@ -281,13 +278,6 @@ def save_pretrained(
f"You passed an invalid `selected_adapters` arguments, current supported adapter names are"
f" {list(self.peft_config.keys())} - got {selected_adapters}."
)
# TODO: remove deprecated parameter in PEFT v0.14.0
if convert_pissa_to_lora is not None:
warnings.warn(
"`convert_pissa_to_lora` is deprecated and will be removed in a future version. "
"Use `path_initial_model_for_weight_conversion` instead."
)
path_initial_model_for_weight_conversion = convert_pissa_to_lora

def save_mutated_as_lora(peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs):
if peft_config.use_rslora and (peft_config.rank_pattern or peft_config.alpha_pattern):
Expand Down
8 changes: 7 additions & 1 deletion tests/test_custom_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -912,7 +912,13 @@ def from_pretrained(cls, model_id, torch_dtype=None):


class PeftCustomModelTester(unittest.TestCase, PeftCommonTester):
"""TODO"""
"""
Implements the tests for custom models.
Most tests should just call the parent class, e.g. test_save_pretrained calls self._test_save_pretrained. Override
this if custom models don't work with the parent test method.
"""

transformers_class = MockTransformerWrapper

Expand Down
38 changes: 0 additions & 38 deletions tests/test_initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,44 +583,6 @@ def test_pissa_alpha_pattern_and_rslora_raises(self, tmp_path):
tmp_path / "pissa-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)

# TODO: remove test for deprecated arg in PEFT v0.14.0
def test_lora_pissa_conversion_same_output_after_loading_with_deprecated_arg(self, data, tmp_path):
model = self.get_model()
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"

tol = 1e-06
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]

peft_model.save_pretrained(tmp_path / "pissa-model-converted", convert_pissa_to_lora=tmp_path / "init-model")
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]

assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)

# TODO: remove test for deprecated warning in PEFT v0.14.0
def test_lora_pissa_conversion_deprecated_warning(self, data, tmp_path):
model = self.get_model()
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
warning_message = "`convert_pissa_to_lora` is deprecated and will be removed in a future version. Use `path_initial_model_for_weight_conversion` instead."
# Test the warning
with pytest.warns(UserWarning, match=warning_message):
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", convert_pissa_to_lora=tmp_path / "init-model"
)

def test_olora_conversion_same_output_after_loading(self, data, tmp_path):
model = self.get_model()
output_base = model(data)[0]
Expand Down
8 changes: 1 addition & 7 deletions tests/test_xlora.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,8 @@
import os

import huggingface_hub
import packaging
import pytest
import torch
import transformers
from safetensors.torch import load_file
from transformers import AutoModelForCausalLM, AutoTokenizer

Expand All @@ -27,9 +25,6 @@
from peft.utils import infer_device


uses_transformers_4_45 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.45.0")


class TestXlora:
torch_device = infer_device()

Expand Down Expand Up @@ -133,8 +128,7 @@ def test_functional(self, tokenizer, model):
)
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()

# TODO: remove the skip when 4.45 is released!
@pytest.mark.skipif(not uses_transformers_4_45, reason="Requires transformers >= 4.45")
# TODO: fix the xfailing test
@pytest.mark.xfail
def test_scalings_logging_methods(self, tokenizer, model):
model.enable_scalings_logging()
Expand Down

0 comments on commit de88c70

Please sign in to comment.