|
1 | 1 | #!/usr/bin/env python3
|
2 | 2 |
|
3 |
| -from typing import cast, Dict, Optional, Union |
| 3 | +from typing import Optional, Union |
4 | 4 |
|
5 | 5 | import torch
|
6 | 6 | from captum._utils.models.linear_model.model import (
|
|
10 | 10 | )
|
11 | 11 | from tests.helpers import BaseTest
|
12 | 12 | from tests.helpers.basic import assertTensorAlmostEqual
|
| 13 | +from tests.utils.evaluate_linear_model import evaluate |
13 | 14 | from torch import Tensor
|
14 | 15 |
|
15 | 16 |
|
16 |
| -def _evaluate(test_data, classifier) -> Dict[str, Tensor]: |
17 |
| - classifier.eval() |
18 |
| - |
19 |
| - l1_loss = 0.0 |
20 |
| - l2_loss = 0.0 |
21 |
| - n = 0 |
22 |
| - l2_losses = [] |
23 |
| - with torch.no_grad(): |
24 |
| - for data in test_data: |
25 |
| - if len(data) == 2: |
26 |
| - x, y = data |
27 |
| - w = None |
28 |
| - else: |
29 |
| - x, y, w = data |
30 |
| - |
31 |
| - out = classifier(x) |
32 |
| - |
33 |
| - y = y.view(x.shape[0], -1) |
34 |
| - assert y.shape == out.shape |
35 |
| - |
36 |
| - if w is None: |
37 |
| - l1_loss += (out - y).abs().sum(0).to(dtype=torch.float64) |
38 |
| - l2_loss += ((out - y) ** 2).sum(0).to(dtype=torch.float64) |
39 |
| - l2_losses.append(((out - y) ** 2).to(dtype=torch.float64)) |
40 |
| - else: |
41 |
| - l1_loss += ( |
42 |
| - (w.view(-1, 1) * (out - y)).abs().sum(0).to(dtype=torch.float64) |
43 |
| - ) |
44 |
| - l2_loss += ( |
45 |
| - (w.view(-1, 1) * ((out - y) ** 2)).sum(0).to(dtype=torch.float64) |
46 |
| - ) |
47 |
| - l2_losses.append( |
48 |
| - (w.view(-1, 1) * ((out - y) ** 2)).to(dtype=torch.float64) |
49 |
| - ) |
50 |
| - |
51 |
| - n += x.shape[0] |
52 |
| - |
53 |
| - l2_losses = torch.cat(l2_losses, dim=0) |
54 |
| - assert n > 0 |
55 |
| - |
56 |
| - # just to double check |
57 |
| - assert ((l2_losses.mean(0) - l2_loss / n).abs() <= 0.1).all() |
58 |
| - |
59 |
| - classifier.train() |
60 |
| - return {"l1": cast(Tensor, l1_loss / n), "l2": cast(Tensor, l2_loss / n)} |
61 |
| - |
62 |
| - |
63 | 17 | class TestLinearModel(BaseTest):
|
64 | 18 | MAX_POINTS: int = 3
|
65 | 19 |
|
@@ -100,7 +54,7 @@ def train_and_compare(
|
100 | 54 |
|
101 | 55 | self.assertTrue(model.bias() is not None if bias else model.bias() is None)
|
102 | 56 |
|
103 |
| - l2_loss = _evaluate(train_loader, model)["l2"] |
| 57 | + l2_loss = evaluate(train_loader, model)["l2"] |
104 | 58 |
|
105 | 59 | if objective == "lasso":
|
106 | 60 | reg = model.representation().norm(p=1).view_as(l2_loss)
|
|
0 commit comments