Skip to content

Commit 6643dd3

Browse files
authored
Merge branch 'master' into patch-1
2 parents 983d44b + 4ca5c2c commit 6643dd3

33 files changed

+1457
-820
lines changed

.conda/meta.yaml

+10-1
Original file line numberDiff line numberDiff line change
@@ -14,19 +14,28 @@ build:
1414
requirements:
1515
host:
1616
- python>=3.9
17+
- setuptools
1718
run:
1819
- numpy<2.0
1920
- pytorch>=1.10
2021
- matplotlib-base
22+
- tqdm
23+
- packaging
2124

2225
test:
2326
imports:
2427
- captum
2528

2629
about:
2730
home: https://captum.ai
28-
license: BSD
31+
license: BSD-3
2932
license_file: LICENSE
3033
summary: Model interpretability for PyTorch
34+
description: |
35+
Captum is a model interpretability and understanding library for PyTorch.
36+
Captum means comprehension in Latin and contains general purpose implementations
37+
of integrated gradients, saliency maps, smoothgrad, vargrad and others for
38+
PyTorch models. It has quick integration for models built with domain-specific
39+
libraries such as torchvision, torchtext, and others.
3140
doc_url: https://captum.ai
3241
dev_url: https://github.com/pytorch/captum

README.md

+5-2
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,6 @@ of integrated gradients, saliency maps, smoothgrad, vargrad and others for
2525
PyTorch models. It has quick integration for models built with domain-specific
2626
libraries such as torchvision, torchtext, and others.
2727

28-
*Captum is currently in beta and under active development!*
29-
3028

3129
#### About Captum
3230

@@ -92,6 +90,7 @@ pip install -e .
9290
To customize the installation, you can also run the following variants of the
9391
above:
9492
* `pip install -e .[insights]`: Also installs all packages necessary for running Captum Insights.
93+
**NOTE**: Captum Insights is being deprecated. See further details [below](#captum-insights).
9594
* `pip install -e .[dev]`: Also installs all tools necessary for development
9695
(testing, linting, docs building; see [Contributing](#contributing) below).
9796
* `pip install -e .[tutorials]`: Also installs all packages necessary for running the tutorial notebooks.
@@ -388,6 +387,10 @@ Captum on different types of models can be found in our tutorials.
388387

389388
## Captum Insights
390389

390+
**NOTE**: *Support for Captum Insights is being deprecated in an upcoming release.
391+
While the code will still be available, there will no longer be active
392+
development or support for it.*
393+
391394
Captum provides a web interface called Insights for easy visualization and
392395
access to a number of our interpretability algorithms.
393396

captum/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,6 @@
99
import captum.robust as robust
1010

1111

12-
__version__ = "0.7.0"
12+
__version__ = "0.8.0"
1313

1414
__all__ = ["attr", "concept", "influence", "log", "metrics", "robust"]

captum/_utils/common.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -232,12 +232,12 @@ def _format_tensor_into_tuples(inputs: None) -> None: ...
232232

233233
@overload
234234
def _format_tensor_into_tuples(
235-
inputs: Union[Tensor, Tuple[Tensor, ...]]
235+
inputs: Union[Tensor, Tuple[Tensor, ...]],
236236
) -> Tuple[Tensor, ...]: ...
237237

238238

239239
def _format_tensor_into_tuples(
240-
inputs: Union[None, Tensor, Tuple[Tensor, ...]]
240+
inputs: Union[None, Tensor, Tuple[Tensor, ...]],
241241
) -> Union[None, Tuple[Tensor, ...]]:
242242
if inputs is None:
243243
return None
@@ -261,7 +261,7 @@ def _format_inputs(inputs: Any, unpack_inputs: bool = True) -> Any:
261261

262262

263263
def _format_float_or_tensor_into_tuples(
264-
inputs: Union[float, Tensor, Tuple[Union[float, Tensor], ...]]
264+
inputs: Union[float, Tensor, Tuple[Union[float, Tensor], ...]],
265265
) -> Tuple[Union[float, Tensor], ...]:
266266
if not isinstance(inputs, tuple):
267267
assert isinstance(
@@ -276,7 +276,7 @@ def _format_float_or_tensor_into_tuples(
276276
@overload
277277
def _format_additional_forward_args(
278278
# pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter.
279-
additional_forward_args: Union[Tensor, Tuple]
279+
additional_forward_args: Union[Tensor, Tuple],
280280
# pyre-fixme[24]: Generic type `tuple` expects at least 1 type parameter.
281281
) -> Tuple: ...
282282

captum/_utils/typing.py

+1
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
TupleOrTensorOrBoolGeneric = TypeVar(
2525
"TupleOrTensorOrBoolGeneric", Tuple[Tensor, ...], Tensor, bool
2626
)
27+
PassThroughOutputType = TypeVar("PassThroughOutputType")
2728
ModuleOrModuleList = TypeVar("ModuleOrModuleList", Module, List[Module])
2829
TargetType = Union[None, int, Tuple[int, ...], Tensor, List[Tuple[int, ...]], List[int]]
2930
BaselineTupleType = Union[None, Tuple[Union[Tensor, int, float], ...]]

captum/attr/_core/dataloader_attr.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#!/usr/bin/env python3
22

33
# pyre-strict
4+
45
from collections import defaultdict
56
from copy import copy
67
from typing import Callable, cast, Dict, Iterable, List, Optional, Tuple, Union
@@ -30,7 +31,6 @@ class InputRole:
3031

3132

3233
# default reducer wehn reduce is None. Simply concat the outputs by the batch dimension
33-
# pyre-fixme[2]: Parameter must be annotated.
3434
def _concat_tensors(accum: Optional[Tensor], cur_output: Tensor, _) -> Tensor:
3535
return cur_output if accum is None else torch.cat([accum, cur_output])
3636

@@ -87,9 +87,7 @@ def _perturb_inputs(
8787
else:
8888
baseline = baselines[attr_inp_count]
8989

90-
# pyre-fixme[58]: `*` is not supported for operand types `object` and
91-
# `Tensor`.
92-
perturbed_inp = inp * pert_mask + baseline * (1 - pert_mask)
90+
perturbed_inp = cast(Tensor, inp) * pert_mask + baseline * (1 - pert_mask)
9391
perturbed_inputs.append(perturbed_inp)
9492

9593
attr_inp_count += 1

0 commit comments

Comments
 (0)