Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ ci:

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
rev: v6.0.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
Expand Down Expand Up @@ -35,25 +35,25 @@ repos:
- id: rst-inline-touching-normal

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.12.0"
rev: "v0.14.3"
hooks:
- id: ruff-check
args: ["--fix", "--show-fixes"]

- repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.10.0
rev: 25.9.0
hooks:
- id: black-jupyter
types_or: [python, pyi, jupyter]

- repo: https://github.com/adamchainz/blacken-docs
rev: 1.19.1
rev: 1.20.0
hooks:
- id: blacken-docs
additional_dependencies: [black==24.10.0]

- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.13.0
rev: v1.18.2
# check the oldest and newest supported Pythons
# except skip python 3.9 for numpy, due to poor typing
hooks:
Expand All @@ -69,14 +69,14 @@ repos:
args: ["--python-version=3.13"]

- repo: https://github.com/codespell-project/codespell
rev: v2.3.0
rev: v2.4.1
hooks:
- id: codespell
files: ^.*\.(py|md|rst)$
args: ["-w", "-L", "hist,gaus"]

- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.30.0
rev: 0.34.1
hooks:
- id: check-readthedocs
args: ["--verbose"]
Expand Down
2 changes: 1 addition & 1 deletion docs/development.rst
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ final deployment to PyPI_ can be done by creating a GitHub Release:
#. Select the release tag that was just pushed, and set the release title to be the tag
(e.g. ``v1.2.3``).
#. Use the "Auto-generate release notes" button to generate a skeleton of the release
notes and then augment them with the preprepared release notes the release maintainer
notes and then augment them with the prepared release notes the release maintainer
has written.
#. Select "This is a pre-release" if the release is a release candidate.
#. Select "Create a discussion for this release" if the release is a stable release.
Expand Down
18 changes: 2 additions & 16 deletions docs/examples/notebooks/ImpactPlot.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -143,24 +143,10 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def calc_impact(idx, b, e, i, width, poi_index):\n",
" _, _, bb, ee = fitresults([(idx, b + e)])\n",
" poi_up_post = bb[poi_index]\n",
"\n",
" _, _, bb, ee = fitresults([(idx, b - e)])\n",
" poi_dn_post = bb[poi_index]\n",
"\n",
" _, _, bb, ee = fitresults([(idx, b + width)])\n",
" poi_up_pre = bb[poi_index]\n",
"\n",
" _, _, bb, ee = fitresults([(idx, b - width)])\n",
" poi_dn_pre = bb[poi_index]\n",
" return np.asarray([poi_dn_post, poi_up_post, poi_dn_pre, poi_up_pre])"
]
"source": "def calc_impact(idx, b, e, i, width, poi_index):\n _, _, bb, _ = fitresults([(idx, b + e)])\n poi_up_post = bb[poi_index]\n\n _, _, bb, _ = fitresults([(idx, b - e)])\n poi_dn_post = bb[poi_index]\n\n _, _, bb, _ = fitresults([(idx, b + width)])\n poi_up_pre = bb[poi_index]\n\n _, _, bb, _ = fitresults([(idx, b - width)])\n poi_dn_pre = bb[poi_index]\n return np.asarray([poi_dn_post, poi_up_post, poi_dn_pre, poi_up_pre])"
},
{
"cell_type": "code",
Expand Down
76 changes: 2 additions & 74 deletions docs/examples/notebooks/binderexample/StatisticalAnalysis.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -171,82 +171,10 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_mc_counts(pars):\n",
" deltas, factors = pdf.modifications(pars)\n",
" allsum = pyhf.tensorlib.concatenate(\n",
" deltas + [pyhf.tensorlib.astensor(pdf.nominal_rates)]\n",
" )\n",
" nom_plus_delta = pyhf.tensorlib.sum(allsum, axis=0)\n",
" nom_plus_delta = pyhf.tensorlib.reshape(\n",
" nom_plus_delta, (1,) + pyhf.tensorlib.shape(nom_plus_delta)\n",
" )\n",
" allfac = pyhf.tensorlib.concatenate(factors + [nom_plus_delta])\n",
" return pyhf.tensorlib.product(allfac, axis=0)\n",
"\n",
"\n",
"animate_plot_pieces = None\n",
"\n",
"\n",
"def init_plot(fig, ax, par_settings):\n",
" global animate_plot_pieces\n",
"\n",
" nbins = sum(list(pdf.config.channel_nbins.values()))\n",
" x = np.arange(nbins)\n",
" data = np.zeros(nbins)\n",
" items = []\n",
" for i in [3, 2, 1, 0]:\n",
" items.append(ax.bar(x, data, 1, alpha=1.0))\n",
" animate_plot_pieces = (\n",
" items,\n",
" ax.scatter(\n",
" x, workspace.data(pdf, include_auxdata=False), c=\"k\", alpha=1.0, zorder=99\n",
" ),\n",
" )\n",
"\n",
"\n",
"def animate(ax=None, fig=None, **par_settings):\n",
" global animate_plot_pieces\n",
" items, obs = animate_plot_pieces\n",
" pars = pyhf.tensorlib.astensor(pdf.config.suggested_init())\n",
" for k, v in par_settings.items():\n",
" pars[par_name_dict[k]] = v\n",
"\n",
" mc_counts = get_mc_counts(pars)\n",
" rectangle_collection = zip(*map(lambda x: x.patches, items))\n",
"\n",
" for rectangles, binvalues in zip(rectangle_collection, mc_counts[:, 0].T):\n",
" offset = 0\n",
" for sample_index in [3, 2, 1, 0]:\n",
" rect = rectangles[sample_index]\n",
" binvalue = binvalues[sample_index]\n",
" rect.set_y(offset)\n",
" rect.set_height(binvalue)\n",
" offset += rect.get_height()\n",
"\n",
" fig.canvas.draw()\n",
"\n",
"\n",
"def plot(ax=None, order=[3, 2, 1, 0], **par_settings):\n",
" pars = pyhf.tensorlib.astensor(pdf.config.suggested_init())\n",
" for k, v in par_settings.items():\n",
" pars[par_name_dict[k]] = v\n",
"\n",
" mc_counts = get_mc_counts(pars)\n",
" bottom = None\n",
" # nb: bar_data[0] because evaluating only one parset\n",
" for i, sample_index in enumerate(order):\n",
" data = mc_counts[sample_index][0]\n",
" x = np.arange(len(data))\n",
" ax.bar(x, data, 1, bottom=bottom, alpha=1.0)\n",
" bottom = data if i == 0 else bottom + data\n",
" ax.scatter(\n",
" x, workspace.data(pdf, include_auxdata=False), c=\"k\", alpha=1.0, zorder=99\n",
" )"
]
"source": "def get_mc_counts(pars):\n deltas, factors = pdf.modifications(pars)\n allsum = pyhf.tensorlib.concatenate(\n deltas + [pyhf.tensorlib.astensor(pdf.nominal_rates)]\n )\n nom_plus_delta = pyhf.tensorlib.sum(allsum, axis=0)\n nom_plus_delta = pyhf.tensorlib.reshape(\n nom_plus_delta, (1,) + pyhf.tensorlib.shape(nom_plus_delta)\n )\n allfac = pyhf.tensorlib.concatenate(factors + [nom_plus_delta])\n return pyhf.tensorlib.product(allfac, axis=0)\n\n\nanimate_plot_pieces = None\n\n\ndef init_plot(fig, ax, par_settings):\n global animate_plot_pieces\n\n nbins = sum(list(pdf.config.channel_nbins.values()))\n x = np.arange(nbins)\n data = np.zeros(nbins)\n items = []\n for i in [3, 2, 1, 0]:\n items.append(ax.bar(x, data, 1, alpha=1.0))\n animate_plot_pieces = (\n items,\n ax.scatter(\n x, workspace.data(pdf, include_auxdata=False), c=\"k\", alpha=1.0, zorder=99\n ),\n )\n\n\ndef animate(ax=None, fig=None, **par_settings):\n global animate_plot_pieces\n items, _ = animate_plot_pieces\n pars = pyhf.tensorlib.astensor(pdf.config.suggested_init())\n for k, v in par_settings.items():\n pars[par_name_dict[k]] = v\n\n mc_counts = get_mc_counts(pars)\n rectangle_collection = zip(*map(lambda x: x.patches, items))\n\n for rectangles, binvalues in zip(rectangle_collection, mc_counts[:, 0].T):\n offset = 0\n for sample_index in [3, 2, 1, 0]:\n rect = rectangles[sample_index]\n binvalue = binvalues[sample_index]\n rect.set_y(offset)\n rect.set_height(binvalue)\n offset += rect.get_height()\n\n fig.canvas.draw()\n\n\ndef plot(ax=None, order=[3, 2, 1, 0], **par_settings):\n pars = pyhf.tensorlib.astensor(pdf.config.suggested_init())\n for k, v in par_settings.items():\n pars[par_name_dict[k]] = v\n\n mc_counts = get_mc_counts(pars)\n bottom = None\n # nb: bar_data[0] because evaluating only one parset\n for i, sample_index in enumerate(order):\n data = mc_counts[sample_index][0]\n x = np.arange(len(data))\n ax.bar(x, data, 1, bottom=bottom, alpha=1.0)\n bottom = data if i == 0 else bottom + data\n ax.scatter(\n x, workspace.data(pdf, include_auxdata=False), c=\"k\", alpha=1.0, zorder=99\n )"
},
{
"cell_type": "markdown",
Expand Down
2 changes: 1 addition & 1 deletion docs/likelihood.rst
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ constraint terms are derived implicitly as each type of modifier
unambiguously defines the constraint terms it requires. Correlated shape
modifiers and normalisation uncertainties have compatible constraint
terms and thus modifiers can be declared that *share* parameters by
re-using a name [1]_ for multiple modifiers. That is, a variation of a
reusing a name [1]_ for multiple modifiers. That is, a variation of a
single parameter causes a shift within sample rates due to both shape
and normalisation variations.

Expand Down
2 changes: 1 addition & 1 deletion src/pyhf/infer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def hypotest(
teststat, sig_plus_bkg_distribution, bkg_only_distribution
)
)
CLsb_exp, CLb_exp, CLs_exp = calc.expected_pvalues(
CLsb_exp, _CLb_exp, CLs_exp = calc.expected_pvalues(
sig_plus_bkg_distribution, bkg_only_distribution
)

Expand Down
1 change: 0 additions & 1 deletion src/pyhf/infer/calculators.py
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,6 @@ def expected_pvalues(self, sig_plus_bkg_distribution, bkg_only_distribution):
:math:`\mathrm{CL}_{b}`, and :math:`\mathrm{CL}_{s}`.
"""
# Calling pvalues is easier then repeating the CLs calculation here
tb, _ = get_backend()
return list(
map(
list,
Expand Down
6 changes: 3 additions & 3 deletions src/pyhf/infer/test_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def _qmu_like(
If the lower bound of the POI is 0 this automatically implements
qmu_tilde. Otherwise this is qmu (no tilde).
"""
tensorlib, optimizer = get_backend()
tensorlib, _ = get_backend()
tmu_like_stat, (mubhathat, muhatbhat) = _tmu_like(
mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=True
)
Expand All @@ -44,7 +44,7 @@ def _tmu_like(
If the lower bound of the POI is 0 this automatically implements
tmu_tilde. Otherwise this is tmu (no tilde).
"""
tensorlib, optimizer = get_backend()
tensorlib, _ = get_backend()
mubhathat, fixed_poi_fit_lhood_val = fixed_poi_fit(
mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_val=True
)
Expand Down Expand Up @@ -515,7 +515,7 @@ def q0(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=Fa
)
mu = 0.0

tensorlib, optimizer = get_backend()
tensorlib, _ = get_backend()

tmu_like_stat, (mubhathat, muhatbhat) = _tmu_like(
mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=True
Expand Down
1 change: 0 additions & 1 deletion src/pyhf/optimize/opt_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ def wrap_objective(objective, data, pdf, stitch_pars, do_grad=False, jit_pieces=
Returns:
objective_and_grad (:obj:`func`): tensor backend wrapped objective,gradient pair
"""
tensorlib, _ = get_backend()
# NB: tuple arguments that need to be hashable (static_argnums)
if do_grad:

Expand Down
8 changes: 6 additions & 2 deletions src/pyhf/readxml.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,9 +283,13 @@ def process_data(


def process_channel(
channelxml: ET.ElementTree, resolver: ResolverType, track_progress: bool = False
channelxml: ET.ElementTree[ET.Element[str]],
resolver: ResolverType,
track_progress: bool = False,
) -> tuple[str, list[float], list[Sample], list[Parameter]]:
channel = channelxml.getroot()
if channel is None:
raise RuntimeError("Root element of ElementTree is missing.")

inputfile = channel.attrib.get('InputFile', '')
histopath = channel.attrib.get('HistoPath', '')
Expand Down Expand Up @@ -316,7 +320,7 @@ def process_channel(


def process_measurements(
toplvl: ET.ElementTree,
toplvl: ET.ElementTree[ET.Element[str]],
other_parameter_configs: Sequence[Parameter] | None = None,
) -> list[Measurement]:
"""
Expand Down
4 changes: 2 additions & 2 deletions src/pyhf/tensor/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,11 @@ def set_backend(

try:
new_optimizer = getattr(
OptimizerRetriever, f"{custom_optimizer.lower()}_optimizer"
OptimizerRetriever, f"{custom_optimizer.lower()!s}_optimizer"
)()
except TypeError:
raise exceptions.InvalidOptimizer(
f"The optimizer provided is not supported: {custom_optimizer}. Select from one of the supported optimizers: scipy, minuit"
f"The optimizer provided is not supported: {custom_optimizer!s}. Select from one of the supported optimizers: scipy, minuit"
)
else:
new_optimizer = custom_optimizer
Expand Down
15 changes: 13 additions & 2 deletions tests/test_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,17 @@ def test_process_normfactor_configs():
assert result['ParallelMeasurement']['mu_both']['bounds'] == [[1.0, 5.0]]


def test_process_channel_missing_root(mocker, tmp_path):
# mock missing ElementTree root
mock_channelxml = mocker.Mock(spec=ET.ElementTree)
mock_channelxml.getroot.return_value = None

resolver = pyhf.readxml.resolver_factory(tmp_path, [])

with pytest.raises(RuntimeError, match="Root element of ElementTree is missing"):
pyhf.readxml.process_channel(mock_channelxml, resolver)


def test_import_histogram():
data, uncert = pyhf.readxml.import_root_histogram(
lambda x: Path("validation/xmlimport_input/data").joinpath(x),
Expand Down Expand Up @@ -486,7 +497,7 @@ def test_import_noChannelData(mocker, datadir):

basedir = datadir.joinpath("xmlimport_noChannelData")
with pytest.raises(
RuntimeError, match="Channel channel1 is missing data. See issue #1911"
RuntimeError, match=r"Channel channel1 is missing data. See issue #1911"
):
pyhf.readxml.parse(basedir.joinpath("config/example.xml"), basedir)

Expand All @@ -499,7 +510,7 @@ def test_import_noChannelDataPaths(mocker, datadir):
basedir = datadir.joinpath("xmlimport_noChannelDataPaths")
with pytest.raises(
NotImplementedError,
match="Conversion of workspaces without data is currently not supported.\nSee https://github.com/scikit-hep/pyhf/issues/566",
match=r"Conversion of workspaces without data is currently not supported.\nSee https://github.com/scikit-hep/pyhf/issues/566",
):
pyhf.readxml.parse(basedir.joinpath("config/example.xml"), basedir)

Expand Down
6 changes: 3 additions & 3 deletions tests/test_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def test_hypotest_return_calculator(
Check that the return structure of pyhf.infer.hypotest with the
addition of the return_calculator keyword arg is as expected
"""
*_, model = hypotest_args
*_, _model = hypotest_args

# only those return flags where the toggled return value
# is placed in front of the calculator in the returned tuple
Expand Down Expand Up @@ -393,7 +393,7 @@ def _make_main_pdf(self, pars):
return pyhf.probability.Poisson(expected_main)

def _make_constraint_pdf(self, pars):
mu, gamma = pars
_, gamma = pars
return pyhf.probability.Poisson(gamma * self.factor)

def expected_data(self, pars, include_auxdata=True):
Expand Down Expand Up @@ -499,7 +499,7 @@ def test_emperical_distribution(tmp_path, hypotest_args):
tb = pyhf.tensorlib
np.random.seed(0)

mu_test, data, model = hypotest_args
mu_test, _, model = hypotest_args
init_pars = model.config.suggested_init()
par_bounds = model.config.suggested_bounds()
fixed_params = model.config.suggested_fixed()
Expand Down
4 changes: 2 additions & 2 deletions tests/test_interpolate.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def filled_shapes(histogramssets, alphasets):


def test_interpolator_structure(interpcode, random_histosets_alphasets_pair):
histogramssets, alphasets = random_histosets_alphasets_pair
histogramssets, _alphasets = random_histosets_alphasets_pair

interpolator = pyhf.interpolators.get(interpcode)(
histogramssets.tolist(), subscribe=False
Expand All @@ -66,7 +66,7 @@ def test_interpolator_structure(interpcode, random_histosets_alphasets_pair):


def test_interpolator_subscription(mocker, interpcode, random_histosets_alphasets_pair):
histogramssets, alphasets = random_histosets_alphasets_pair
histogramssets, _alphasets = random_histosets_alphasets_pair
ename = 'tensorlib_changed'

interpolator_cls = pyhf.interpolators.get(interpcode)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ def test_minuit_set_options(mocker):

def test_get_tensor_shim(monkeypatch):
monkeypatch.setattr(pyhf.tensorlib, 'name', 'fake_backend')
with pytest.raises(ValueError, match="No optimizer shim for fake_backend."):
with pytest.raises(ValueError, match=r"No optimizer shim for fake_backend."):
_get_tensor_shim()


Expand Down
2 changes: 1 addition & 1 deletion tests/test_pdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -1356,6 +1356,6 @@ def test_multi_component_poi():

with pytest.raises(
pyhf.exceptions.InvalidModel,
match="The parameter 'mu' contains multiple components and is not currently supported as parameter of interest.",
match=r"The parameter 'mu' contains multiple components and is not currently supported as parameter of interest.",
):
pyhf.Workspace(spec).model()
1 change: 0 additions & 1 deletion tests/test_public_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,6 @@ def test_prob_models(backend):


def test_pdf_batched(backend):
tb, _ = backend
source = {
"binning": [2, -0.5, 1.5],
"bindata": {"data": [55.0], "bkg": [50.0], "bkgerr": [7.0], "sig": [10.0]},
Expand Down
Loading
Loading