@@ -387,45 +387,8 @@ AbstractGP currently does not (yet) learn hyperparameters internally. The follow
387387
388388 N_models = size(output_values, 1) #size(transformed_data)[1]
389389 regularization_noise = gp.alg_reg_noise
390- #= if gp.kernel === nothing
391- println("Using default squared exponential kernel, learning length scale and variance parameters")
392- # Create default squared exponential kernel
393- const_value = 1.0
394- rbf_len = fill(1.0, size(input_values, 2))
395- rbf = const_value * (KernelFunctions.SqExponentialKernel() ∘ ARDTransform(rbf_len))
396- kern = rbf
397- println("Using default squared exponential kernel:", kern)
398- else
399- kern = deepcopy(gp.kernel)
400- println("Using user-defined kernel", kern)
401- end
402-
403- if gp.noise_learn
404- # Add white noise to kernel
405- white_noise_level = 1.0
406- white = white_noise_level * KernelFunctions.WhiteKernel()
407- kern += white
408- println("Learning additive white noise")
409- end
410- for i in 1:N_models
411- kernel_i = deepcopy(kern)
412- # In contrast to the GPJL and SKLJL case "data_i = output_values[i, :]"
413- data_i = output_values[i, :]
414- f = AbstractGPs.GP(kernel_i)
415- # f arguments:
416- # input_values: (input_dim * N_dims)
417- fx = f(input_values' , regularization_noise)
418- # posterior arguments:
419- # data_i: (N_samples,)
420- post_fx = posterior(fx, data_i)
421- if i == 1
422- println(kernel_i)
423- print(" Completed training of: " )
424- end
425- println(" created GP: " , i)
426- end
427- = #
428- # now obtain the values
390+
391+ # now obtain the values of the hyperparameters
429392 if N_models == 1 && !(isa(kernel_params, AbstractVector)) # i.e. just a Dict
430393 kernel_params_vec = [kernel_params]
431394 else
0 commit comments