Skip to content

Commit bdc841f

Browse files
author
Closed-Limelike-Curves
committed
Fix typo
1 parent 73fe8a0 commit bdc841f

File tree

3 files changed

+15
-13
lines changed

3 files changed

+15
-13
lines changed

Project.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "ParetoSmooth"
22
uuid = "a68b5a21-f429-434e-8bfa-46b447300aac"
33
authors = ["Carlos Parada <[email protected]>"]
4-
version = "0.6.4"
4+
version = "0.6.5"
55

66
[deps]
77
AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5"

src/InternalHelpers.jl

+2-2
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ function _throw_pareto_k_warning(ξ)
3737
@warn "Some Pareto k values are extremely high (>1). PSIS will not produce " *
3838
"consistent estimates."
3939
elseif any.≥ 0.7)
40-
@warn "Some Pareto k values are high (>.7), indicating that PSIS has failed to " *
40+
@warn "Some Pareto k values are high (>.7), indicating PSIS has failed to " *
4141
"approximate the true distribution."
4242
elseif any.≥ 0.5)
4343
@info "Some Pareto k values are slightly high (>.5); some pointwise estimates " *
@@ -69,7 +69,7 @@ function _convert_to_array(matrix::AbstractMatrix, chain_index::AbstractVector)
6969
throw(
7070
ArgumentError(
7171
"Indices must be numbered from 1 through the total number of chains."
72-
),
72+
)
7373
)
7474
else
7575
# Check how many elements are in each chain, assign to "counts"

src/ModelComparison.jl

+12-10
Original file line numberDiff line numberDiff line change
@@ -31,20 +31,22 @@ A struct containing the results of model comparison.
3131
substantial overlap, which creates a downward biased estimator. LOO-CV differences are
3232
*not* asymptotically normal, so these standard errors cannot be used to
3333
calculate a confidence interval.
34-
- `gmp::NamedTuple`: The geometric mean of the posterior probability assigned to each data
35-
point by each model. This is equal to `exp(cv_avg/n)` for each model. By taking the
36-
exponent of the average score, we can take outcomes on the log scale and shift them back
37-
onto the probability scale, making the results more easily interpretable. This measure
38-
is only meaningful for classifiers, i.e. variables with discrete outcomes; it is not
39-
possible to interpret GMP values for continuous outcome variables.
34+
- `gmpd::NamedTuple`: The geometric mean of the predictive distribution. It equals the
35+
geometric mean of the probability assigned to each data point by the model, that is,
36+
`exp(cv_avg)`. This measure is only meaningful for classifiers (variables with discrete
37+
outcomes). We can think of it as measuring how often the model was right: A model that
38+
always predicts incorrectly will have a GMPD of 0, while a model that always predicts
39+
correctly will have a GMPD of 1. However, the GMPD gives a model "Partial points"
40+
between 0 and 1 whenever the model assigns a probability other than 0 or 1 to the
41+
outcome that actually happened.
4042
4143
See also: [`PsisLoo`](@ref)
4244
"""
4345
struct ModelComparison{RealType<:Real, N}
4446
pointwise::KeyedArray{RealType, 3, <:NamedDimsArray, <:Any}
4547
estimates::KeyedArray{RealType, 2, <:NamedDimsArray, <:Any}
4648
std_err::NamedTuple{<:Any, Tuple{Vararg{RealType, N}}}
47-
gmp::NamedTuple{<:Any, Tuple{Vararg{RealType, N}}}
49+
gmpd::NamedTuple{<:Any, Tuple{Vararg{RealType, N}}}
4850
end
4951

5052

@@ -114,8 +116,8 @@ function loo_compare(
114116
log_norm = logsumexp(cv_elpd)
115117
weights = @turbo warn_check_args=false @. exp(cv_elpd - log_norm)
116118

117-
gmp = @turbo @. exp(cv_elpd / data_size)
118-
gmp = NamedTuple{name_tuple}(gmp)
119+
gmpd = @turbo @. exp(cv_elpd / data_size)
120+
gmpd = NamedTuple{name_tuple}(gmpd)
119121

120122
@turbo warn_check_args=false @. cv_elpd = cv_elpd - cv_elpd[1]
121123
@turbo warn_check_args=false avg_elpd = cv_elpd ./ data_size
@@ -125,7 +127,7 @@ function loo_compare(
125127
statistic=[:cv_elpd, :cv_avg, :weight],
126128
)
127129

128-
return ModelComparison(pointwise_diffs, total_diffs, se_total, gmp)
130+
return ModelComparison(pointwise_diffs, total_diffs, se_total, gmpd)
129131

130132
end
131133

0 commit comments

Comments
 (0)