@@ -1612,12 +1612,27 @@ def test_call_max_var_criterion_with_dataset_gptq_neg_group_size(mode):
16121612
16131613
16141614@pytest .mark .parametrize (
1615- "params, transpose_b" ,
1616- ((None , True ), (LoraParams (adapter_rank = 4 , use_int8_adapters = False ), False )),
1615+ "params, transpose_a, transpose_b" ,
1616+ (
1617+ (None , False , True ), # original
1618+ (LoraParams (adapter_rank = 4 , use_int8_adapters = False ), False , False ), # original
1619+ pytest .param (
1620+ LoraParams (adapter_rank = 4 , use_int8_adapters = False ),
1621+ True ,
1622+ False ,
1623+ marks = pytest .mark .skip (reason = "LoRA correction does not support transpose_a=True yet" ),
1624+ ),
1625+ pytest .param (
1626+ LoraParams (adapter_rank = 8 , use_int8_adapters = True ),
1627+ True ,
1628+ True ,
1629+ marks = pytest .mark .skip (reason = "LoRA correction does not support transpose_a=True or transpose_b=True yet" ),
1630+ ),
1631+ ),
16171632)
1618- def test_lora_adapters_in_the_graph (params , transpose_b ):
1633+ def test_lora_adapters_in_the_graph (params , transpose_a , transpose_b ):
16191634 advanced_parameters = CompressionParams () if params is None else CompressionParams (lora_correction_params = params )
1620- model = LMLinearModel (transpose_b = transpose_b )
1635+ model = LMLinearModel (transpose_a = transpose_a , transpose_b = transpose_b )
16211636 ov_model = model .ov_model
16221637 dataset = Dataset (np .ones (inp .shape ) for inp in ov_model .inputs )
16231638
0 commit comments