We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents 5cc91b1 + a96d2f0 commit 3a6911fCopy full SHA for 3a6911f
bitsandbytes/nn/modules.py
@@ -273,6 +273,7 @@ def from_prequantized(
273
quantized_stats: Dict[str, Any],
274
requires_grad: bool = False,
275
device="cuda",
276
+ module: Optional["Linear4bit"] = None,
277
**kwargs,
278
) -> "Params4bit":
279
self = torch.Tensor._make_subclass(cls, data.to(device))
@@ -284,6 +285,10 @@ def from_prequantized(
284
285
self.bnb_quantized = True
286
287
self.quant_storage = data.dtype
288
+ self.module = module
289
+
290
+ if self.module is not None:
291
+ self.module.quant_state = self.quant_state
292
293
return self
294
0 commit comments