@@ -300,6 +300,24 @@ function placebo_test(model::ExtremeLearningMachine)
300
300
return predict (model, model. X), model. counterfactual
301
301
end
302
302
303
+ """
304
+ ridge_constant(model)
305
+
306
+ Calculate the L2 penalty for a regularized extreme learning machine.
307
+
308
+ For more information see:
309
+ Li, Guoqiang, and Peifeng Niu. "An enhanced extreme learning machine based on ridge
310
+ regression for regression." Neural Computing and Applications 22, no. 3 (2013):
311
+ 803-810.
312
+
313
+ Examples
314
+ ```julia-repl
315
+ julia> m1 = RegularizedExtremeLearner(x, y, 10, σ)
316
+ Extreme Learning Machine with 10 hidden neurons
317
+ julia> ridge_constant(m1)
318
+ 0.26789338524662887
319
+ ```
320
+ """
303
321
function ridge_constant (model:: RegularizedExtremeLearner )
304
322
β0 = @fastmath pinv (model. H) * model. Y
305
323
σ̃ = @fastmath ((transpose (model. Y .- (model. H * β0)) * (model. Y .- (model. H * β0))) /
@@ -308,6 +326,23 @@ function ridge_constant(model::RegularizedExtremeLearner)
308
326
return @fastmath first ((model. H[2 ]* σ̃)/ (transpose (β0)* transpose (model. H)* model. H* β0))
309
327
end
310
328
329
+ """
330
+ set_weights_biases(model)
331
+
332
+ Calculate the weights and biases for an extreme learning machine or regularized extreme
333
+ learning machine.
334
+
335
+ For details see;
336
+ Huang, Guang-Bin, Qin-Yu Zhu, and Chee-Kheong Siew. "Extreme learning machine: theory
337
+ and applications." Neurocomputing 70, no. 1-3 (2006): 489-501.
338
+
339
+ Examples
340
+ ```julia-repl
341
+ julia> m1 = RegularizedExtremeLearner(x, y, 10, σ)
342
+ Extreme Learning Machine with 10 hidden neurons
343
+ julia> set_weights_biases(m1)
344
+ ```
345
+ """
311
346
function set_weights_biases (model:: ExtremeLearningMachine )
312
347
model. weights = rand (Float64, model. features, model. hidden_neurons)
313
348
model. bias = rand (Float64, 1 , model. hidden_neurons)
0 commit comments