Skip to content

Commit b8790ce

Browse files
authored
Remove deprecated components for 0.6.0 (#2293)
1 parent d7afc40 commit b8790ce

File tree

2 files changed

+2
-22
lines changed

2 files changed

+2
-22
lines changed

torchtune/modules/peft/_utils.py

-15
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
import torch
1111
from torch import nn
12-
from torchtune.utils._logging import deprecated
1312

1413
# Modules from MultiHeadAttention that LoRA can be applied to
1514
LORA_ATTN_MODULES = Literal["q_proj", "k_proj", "v_proj", "output_proj"]
@@ -313,17 +312,3 @@ def validate_missing_and_unexpected_for_lora(
313312
raise AssertionError(f"Missing LoRA key {k} from adapter state dict")
314313
if lora_unexpected:
315314
raise AssertionError("Unexpected key loading adapter")
316-
317-
318-
@deprecated(
319-
msg="load_dora_magnitudes will be deprecated in 0.6.0. Please use DoRALinear.initialize_dora_magnitude instead."
320-
)
321-
def load_dora_magnitudes(model: nn.Module) -> None:
322-
"""
323-
For DoRA magnitude we use setattr to move from meta device
324-
"""
325-
dora_parents = {
326-
n: p for n, p in model.named_modules() if hasattr(p, "adapter_params")
327-
}
328-
sd = {f"{n}.magnitude": p.magnitude for n, p in dora_parents.items()}
329-
model.load_state_dict(sd, strict=False, assign=True)

torchtune/training/quantization.py

+2-7
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
# LICENSE file in the root directory of this source tree.
66

77
from typing import Callable, Optional
8-
from warnings import warn
98

109
from torch import nn
1110
from torchtune.modules.peft.lora import LoRALinear, QATLoRALinear
@@ -144,6 +143,7 @@ class Int4WeightOnlyQATQuantizerModuleSwap(Int4WeightOnlyQATQuantizer):
144143
"4w-qat-module-swap"
145144
] = enable_4w_fake_quant_module_swap
146145

146+
147147
# int8 dynamic activations + int4 weight
148148
class Int8DynActInt4WeightQATQuantizerModuleSwap(Int8DynActInt4WeightQATQuantizer):
149149
pass
@@ -179,12 +179,7 @@ def get_quantizer_mode(quantizer: Optional[Callable]) -> Optional[str]:
179179
Returns:
180180
Optional[str]: The quantization mode.
181181
"""
182-
mode = _quantizer_to_mode.get(type(quantizer), None)
183-
if mode is not None and "module-swap" in mode:
184-
warn(
185-
"*QuantizerModuleSwap is deprecated. Please use the version without 'ModuleSwap' instead"
186-
)
187-
return mode
182+
return _quantizer_to_mode.get(type(quantizer), None)
188183

189184

190185
def _get_disable_fake_quant(quantizer_mode: str) -> Callable:

0 commit comments

Comments
 (0)