Skip to content

Commit 9e4345a

Browse files
committed
Fixed typos and added docstrings
1 parent 9c91cbe commit 9e4345a

8 files changed

+81
-14
lines changed

docs/make.jl

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ makedocs(;
1616
),
1717
pages=[
1818
"CausalELM" => "index.md",
19-
"Getting Started" => Any[
19+
"User Guide" => Any[
2020
"Interrupted Time Series Estimation" => "guide/its.md",
2121
"G-computation" => "guide/gcomputation.md",
2222
"Double Machine Learning" => "guide/doublemachinelearning.md",

docs/src/reference/crossval.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@ Methods to find the optimal number of neurons via cross validation
55
CausalELM.CrossValidation
66
CausalELM.CrossValidation.recode
77
CausalELM.CrossValidation.generate_folds
8-
CausalELM.CrossValidate.validate_fold
9-
CausalELM.CrossValidate.cross_validate
10-
CausalELM.CrossValidate.best_size
11-
CausalELM.CrossValidate.shuffle_date
8+
CausalELM.CrossValidation.validate_fold
9+
CausalELM.CrossValidation.cross_validate
10+
CausalELM.CrossValidation.best_size
11+
CausalELM.CrossValidation.shuffle_data
1212
```

docs/src/reference/estimation.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ CausalELM.Estimators.CausalEstimator
88
CausalELM.Estimators.InterruptedTimeSeries
99
CausalELM.Estimators.GComputation
1010
CausalELM.Estimators.DoubleMachineLearning
11-
CausalELM.Estimators.estimatecausaleffect!
11+
CausalELM.Estimators.estimate_causal_effect!
1212
CausalELM.Estimators.first_stage!
1313
CausalELM.Estimators.ate!
1414
CausalELM.Estimators.predict_propensity_score

docs/src/reference/inference.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,6 @@ Methods for summarization and inference of estimators in the CausalELM package
44
```@docs
55
CausalELM.Inference
66
CausalELM.Inference.summarize
7-
CausalELM.Inference.quantitiesofinterest
8-
CausalELM.Inference.generatenulldistribution
7+
CausalELM.Inference.quantities_of_interest
8+
CausalELM.Inference.generate_null_distribution
99
```

src/inference.jl

+5-5
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ function quantities_of_interest(m::Union{CausalEstimator, Metalearner}, n::Integ
321321
end
322322

323323
"""
324-
quantities_of_interest(model, nsplits)
324+
quantities_of_interest(model, n)
325325
326326
Generate a p-value and standard error through randomization inference
327327
@@ -345,16 +345,16 @@ julia> quantities_of_interest(its, 10)
345345
(0.0, 0.07703275541001667)
346346
```
347347
"""
348-
function quantities_of_interest(model::InterruptedTimeSeries, nsplits::Integer=1000,
348+
function quantities_of_interest(model::InterruptedTimeSeries, n::Integer=1000,
349349
mean_effect::Bool=true)
350-
local null_dist = generate_null_distribution(model, nsplits, mean_effect)
350+
local null_dist = generate_null_distribution(model, n, mean_effect)
351351
local metric = ifelse(mean_effect, mean, sum)
352352
local effect = metric(model.Δ)
353353

354354
extremes = length(null_dist[effect .< abs.(null_dist)])
355-
pvalue = extremes/nsplits
355+
pvalue = extremes/n
356356

357-
stderr = (sum([(effect .- x)^2 for x in null_dist])/(nsplits-1))/sqrt(nsplits)
357+
stderr = (sum([(effect .- x)^2 for x in null_dist])/(n-1))/sqrt(n)
358358

359359
return pvalue, stderr
360360
end

src/metalearners.jl

+28-1
Original file line numberDiff line numberDiff line change
@@ -405,6 +405,19 @@ julia> estimatecausaleffect!(m1)
405405
"""
406406
estimate_causal_effect!(m::Metalearner) = estimate_causal_effect!(m)
407407

408+
"""
409+
stage1!(x)
410+
411+
Estimate the first stage models for an X-learner.
412+
413+
This method should not be called by the user.
414+
415+
```julia-repl
416+
julia> X, Y, T = rand(100, 5), rand(100), [rand()<0.4 for i in 1:100]
417+
julia> m1 = XLearner(X, Y, T)
418+
julia> stage1!(m1)
419+
```
420+
"""
408421
function stage1!(x::XLearner)
409422
if x.regularized
410423
x.g = RegularizedExtremeLearner(x.X, x.T, x.num_neurons, x.activation)
@@ -423,9 +436,23 @@ function stage1!(x::XLearner)
423436
x.ps = predict(x.g, x.X)
424437

425438
# Fit first stage outcome models
426-
fit!(x.μ₀), fit!(x.μ₁)
439+
fit!(x.μ₀); fit!(x.μ₁)
427440
end
428441

442+
"""
443+
stage2!(x)
444+
445+
Estimate the second stage models for an X-learner.
446+
447+
This method should not be called by the user.
448+
449+
```julia-repl
450+
julia> X, Y, T = rand(100, 5), rand(100), [rand()<0.4 for i in 1:100]
451+
julia> m1 = XLearner(X, Y, T)
452+
julia> stage1!(m1)
453+
julia> stage2!(m1)
454+
```
455+
"""
429456
function stage2!(x::XLearner)
430457
d = ifelse(x.T === 0, predict(x.μ₁, x.X .- x.Y), x.Y .- predict(x.μ₀, x.X))
431458

src/model_validation.jl

+5
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
1+
"""
2+
Methods to test the sensitivity of interrupted time series estimators, G-computation, double
3+
machine learning, S-learners, T-learners, and X-learners to violations of their modeling
4+
assumptions.
5+
"""
16
module ModelValidation
27

38
using ..Estimators: InterruptedTimeSeries, estimate_causal_effect!, GComputation,

src/models.jl

+35
Original file line numberDiff line numberDiff line change
@@ -300,6 +300,24 @@ function placebo_test(model::ExtremeLearningMachine)
300300
return predict(model, model.X), model.counterfactual
301301
end
302302

303+
"""
304+
ridge_constant(model)
305+
306+
Calculate the L2 penalty for a regularized extreme learning machine.
307+
308+
For more information see:
309+
Li, Guoqiang, and Peifeng Niu. "An enhanced extreme learning machine based on ridge
310+
regression for regression." Neural Computing and Applications 22, no. 3 (2013):
311+
803-810.
312+
313+
Examples
314+
```julia-repl
315+
julia> m1 = RegularizedExtremeLearner(x, y, 10, σ)
316+
Extreme Learning Machine with 10 hidden neurons
317+
julia> ridge_constant(m1)
318+
0.26789338524662887
319+
```
320+
"""
303321
function ridge_constant(model::RegularizedExtremeLearner)
304322
β0 = @fastmath pinv(model.H) * model.Y
305323
σ̃ = @fastmath ((transpose(model.Y .- (model.H * β0)) * (model.Y .- (model.H * β0))) /
@@ -308,6 +326,23 @@ function ridge_constant(model::RegularizedExtremeLearner)
308326
return @fastmath first((model.H[2]*σ̃)/(transpose(β0)*transpose(model.H)*model.H*β0))
309327
end
310328

329+
"""
330+
set_weights_biases(model)
331+
332+
Calculate the weights and biases for an extreme learning machine or regularized extreme
333+
learning machine.
334+
335+
For details see;
336+
Huang, Guang-Bin, Qin-Yu Zhu, and Chee-Kheong Siew. "Extreme learning machine: theory
337+
and applications." Neurocomputing 70, no. 1-3 (2006): 489-501.
338+
339+
Examples
340+
```julia-repl
341+
julia> m1 = RegularizedExtremeLearner(x, y, 10, σ)
342+
Extreme Learning Machine with 10 hidden neurons
343+
julia> set_weights_biases(m1)
344+
```
345+
"""
311346
function set_weights_biases(model::ExtremeLearningMachine)
312347
model.weights = rand(Float64, model.features, model.hidden_neurons)
313348
model.bias = rand(Float64, 1, model.hidden_neurons)

0 commit comments

Comments
 (0)