-
Notifications
You must be signed in to change notification settings - Fork 309
Expand file tree
/
Copy pathmedusa_lora.py
More file actions
102 lines (86 loc) · 3.13 KB
/
medusa_lora.py
File metadata and controls
102 lines (86 loc) · 3.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Type
import torch
from lorax_server.adapters.config import AdapterConfig, ModuleMap
from lorax_server.adapters.lora import BatchLoraWeights, LoraConfig, LoraWeights
from lorax_server.adapters.medusa import BatchMedusaWeights, MedusaConfig, MedusaWeights
from lorax_server.adapters.weights import AdapterWeights, BatchAdapterWeights
if TYPE_CHECKING:
from lorax_server.models.model import Model
EMPTY_TENSOR = torch.tensor([])
@dataclass
class MedusaLoraModuleMap:
lora_module_map: ModuleMap
medusa_module_map: ModuleMap
@dataclass
class MedusaLoraConfig(AdapterConfig):
lora_config: LoraConfig
medusa_config: MedusaConfig
def map_weights_for_model(
self,
adapter_weights: Dict,
weight_names: Tuple[str],
embedding_weight_name: str,
) -> Tuple[MedusaLoraModuleMap, Set[str]]:
lora_module_map, weight_names = self.lora_config.map_weights_for_model(
adapter_weights,
weight_names,
embedding_weight_name
)
medusa_module_map, _ = self.medusa_config.map_weights_for_model(
adapter_weights,
weight_names,
embedding_weight_name
)
return MedusaLoraModuleMap(lora_module_map, medusa_module_map), weight_names
def load_batched_adapter_weights(
self,
model: "Model",
module_map: MedusaLoraModuleMap,
layer_type: str,
unused_weight_names: Set[str],
dynamic: bool,
) -> Optional[AdapterWeights]:
lora_weights = self.lora_config.load_batched_adapter_weights(
model, module_map.lora_module_map, layer_type, unused_weight_names, dynamic
)
medusa_weights = self.medusa_config.load_batched_adapter_weights(
model, module_map.medusa_module_map, layer_type, unused_weight_names, dynamic
)
return MedusaLoraWeights.load(
lora_weights,
medusa_weights,
)
@classmethod
def load(cls, adapter_id: str, config: dict, api_token: str) -> "MedusaLoraConfig":
lora_config = LoraConfig.load(adapter_id, api_token)
medusa_config = MedusaConfig.load(config)
return cls(
base_model_name_or_path=lora_config.base_model_name_or_path,
lora_config=lora_config,
medusa_config=medusa_config,
)
class MedusaLoraWeights(AdapterWeights):
def __init__(
self,
lora_weights: LoraWeights,
medusa_weights: MedusaWeights,
):
self.lora_weights = lora_weights
self.medusa_weights = medusa_weights
@classmethod
def get_batch_types(cls) -> List[Type[BatchAdapterWeights]]:
return [BatchLoraWeights, BatchMedusaWeights]
@property
def speculative_tokens(self) -> int:
return self.medusa_weights.speculative_tokens
@classmethod
def load(
cls,
lora_weights: LoraWeights,
medusa_weights: MedusaWeights,
) -> Optional[AdapterWeights]:
return MedusaLoraWeights(
lora_weights,
medusa_weights,
)