File tree 2 files changed +2
-22
lines changed
2 files changed +2
-22
lines changed Original file line number Diff line number Diff line change 9
9
10
10
import torch
11
11
from torch import nn
12
- from torchtune .utils ._logging import deprecated
13
12
14
13
# Modules from MultiHeadAttention that LoRA can be applied to
15
14
LORA_ATTN_MODULES = Literal ["q_proj" , "k_proj" , "v_proj" , "output_proj" ]
@@ -313,17 +312,3 @@ def validate_missing_and_unexpected_for_lora(
313
312
raise AssertionError (f"Missing LoRA key { k } from adapter state dict" )
314
313
if lora_unexpected :
315
314
raise AssertionError ("Unexpected key loading adapter" )
316
-
317
-
318
- @deprecated (
319
- msg = "load_dora_magnitudes will be deprecated in 0.6.0. Please use DoRALinear.initialize_dora_magnitude instead."
320
- )
321
- def load_dora_magnitudes (model : nn .Module ) -> None :
322
- """
323
- For DoRA magnitude we use setattr to move from meta device
324
- """
325
- dora_parents = {
326
- n : p for n , p in model .named_modules () if hasattr (p , "adapter_params" )
327
- }
328
- sd = {f"{ n } .magnitude" : p .magnitude for n , p in dora_parents .items ()}
329
- model .load_state_dict (sd , strict = False , assign = True )
Original file line number Diff line number Diff line change 5
5
# LICENSE file in the root directory of this source tree.
6
6
7
7
from typing import Callable , Optional
8
- from warnings import warn
9
8
10
9
from torch import nn
11
10
from torchtune .modules .peft .lora import LoRALinear , QATLoRALinear
@@ -144,6 +143,7 @@ class Int4WeightOnlyQATQuantizerModuleSwap(Int4WeightOnlyQATQuantizer):
144
143
"4w-qat-module-swap"
145
144
] = enable_4w_fake_quant_module_swap
146
145
146
+
147
147
# int8 dynamic activations + int4 weight
148
148
class Int8DynActInt4WeightQATQuantizerModuleSwap (Int8DynActInt4WeightQATQuantizer ):
149
149
pass
@@ -179,12 +179,7 @@ def get_quantizer_mode(quantizer: Optional[Callable]) -> Optional[str]:
179
179
Returns:
180
180
Optional[str]: The quantization mode.
181
181
"""
182
- mode = _quantizer_to_mode .get (type (quantizer ), None )
183
- if mode is not None and "module-swap" in mode :
184
- warn (
185
- "*QuantizerModuleSwap is deprecated. Please use the version without 'ModuleSwap' instead"
186
- )
187
- return mode
182
+ return _quantizer_to_mode .get (type (quantizer ), None )
188
183
189
184
190
185
def _get_disable_fake_quant (quantizer_mode : str ) -> Callable :
You can’t perform that action at this time.
0 commit comments