Skip to content

Commit 3896595

Browse files
Merge pull request #58 from msainsburydale/auto-juliaformatter-pr
Automatic JuliaFormatter.jl run
2 parents 00f868d + adfd1b7 commit 3896595

File tree

5 files changed

+8
-19
lines changed

5 files changed

+8
-19
lines changed

src/Estimators.jl

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -492,20 +492,17 @@ function PosteriorEstimator(network, d::Integer, dstar::Integer = d; q = Normali
492492
end
493493

494494
# Distribution used to approximate the posterior
495-
q = q(d, dstar; kwargs...)
495+
q = q(d, dstar; kwargs...)
496496

497497
# Initialise the estimator
498498
return PosteriorEstimator(q, network)
499499
end
500500

501501
# Constructor for consistent argument ordering
502-
function PosteriorEstimator(network, q::A) where A <: ApproximateDistribution
502+
function PosteriorEstimator(network, q::A) where {A <: ApproximateDistribution}
503503
return PosteriorEstimator(q, network)
504504
end
505505

506-
507-
508-
509506
#TODO maybe its better to not have a tuple, and just allow the arguments to be passed as normal... Just have to change DeepSet definition to allow two arguments in some places (this is more natural). Can easily allow backwards compat in this case too.
510507
@doc raw"""
511508
RatioEstimator <: NeuralEstimator
@@ -615,8 +612,6 @@ end
615612
# Z = simulate(θ, m)
616613
# r̂(Z, θ) # log of the likelihood-to-evidence ratios
617614

618-
619-
620615
@doc raw"""
621616
PiecewiseEstimator <: NeuralEstimator
622617
PiecewiseEstimator(estimators::Vector{BayesEstimator}, changepoints::Vector{Integer})

src/assess.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ function assess(
269269
# Add estimator name if it was provided
270270
if !isnothing(estimator_names) # deprecation coercion
271271
estimator_name = estimator_names
272-
end
272+
end
273273
if !isnothing(estimator_name)
274274
θ̂[!, "estimator"] .= estimator_name
275275
runtime[!, "estimator"] .= estimator_name
@@ -296,7 +296,7 @@ function assess(
296296
df_s = DataFrame(
297297
parameter = repeat(parameter_names, inner = N),
298298
truth = repeat(θ[:, idx], inner = N),
299-
draw = repeat(1:N, outer = d),
299+
draw = repeat(1:N, outer = d),
300300
value = vec(S')
301301
)
302302
df_s[!, "k"] .= ((idx - 1) % K) + 1

src/inference.jl

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -139,11 +139,10 @@ function sampleposterior(
139139
est::RatioEstimator,
140140
Z,
141141
N::Integer = 1000;
142-
logprior::Function = θ -> 0f0,
142+
logprior::Function = θ -> 0.0f0,
143143
θ_grid = nothing, theta_grid = nothing,
144144
# θ₀ = nothing, theta0 = nothing, # NB a basic MCMC sampler could be initialised with θ₀
145145
kwargs...)
146-
147146
Z = f32(Z)
148147

149148
# Check duplicated arguments that are needed so that the R interface uses ASCII characters only
@@ -168,7 +167,6 @@ function sampleposterior(
168167
end
169168
end
170169

171-
172170
function sampleposterior(estimator::PosteriorEstimator, Z, N::Integer = 1000; kwargs...)
173171
# Determine if we are using the gpu
174172
args = (; kwargs...)
@@ -207,7 +205,7 @@ See also [`posteriormedian()`](@ref), [`posteriormean()`](@ref).
207205
"""
208206
function posteriormode(
209207
est::RatioEstimator, Z;
210-
logprior::Function = θ -> 0f0, penalty::Union{Function, Nothing} = nothing,
208+
logprior::Function = θ -> 0.0f0, penalty::Union{Function, Nothing} = nothing,
211209
θ_grid = nothing, theta_grid = nothing,
212210
θ₀ = nothing, theta0 = nothing,
213211
kwargs...

src/train.jl

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,8 @@ function findlr(opt)
107107
end
108108

109109
function _train(
110-
estimator,
111-
sampler,
110+
estimator,
111+
sampler,
112112
simulator;
113113
m = nothing,
114114
ξ = nothing, xi = nothing,
@@ -190,7 +190,6 @@ function _train(
190190
local min_val_risk = val_risk # minimum validation loss, monitored for early stopping
191191
local early_stopping_counter = 0
192192
train_time = @elapsed for epoch 1:epochs
193-
194193
GC.gc(false)
195194

196195
if store_entire_train_set
@@ -322,7 +321,6 @@ function _train(estimator, θ_train::P, θ_val::P, simulator;
322321
local min_val_risk = val_risk
323322
local early_stopping_counter = 0
324323
train_time = @elapsed for epoch = 1:epochs
325-
326324
GC.gc(false)
327325

328326
sim_time = 0.0
@@ -441,7 +439,6 @@ function _train(estimator, θ_train::P, θ_val::P, Z_train::T, Z_val::T;
441439
local min_val_risk = val_risk
442440
local early_stopping_counter = 0
443441
train_time = @elapsed for epoch = 1:epochs
444-
445442
GC.gc(false)
446443

447444
# For each batch update estimator and compute the training loss

src/utility.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,6 @@ function _checkgpu(use_gpu; verbose::Bool = true)
289289
return (device)
290290
end
291291

292-
293292
# Here, we define _manualgc() for the case that CUDA has not been loaded (so, we will be using the CPU)
294293
# For the case that CUDA is loaded, the function is overloaded in ext/NeuralEstimatorsCUDAExt.jl
295294
# NB Julia complains if we overload functions in package extensions... to get around this, here we

0 commit comments

Comments
 (0)