diff --git a/Project.toml b/Project.toml index 162c2a5..999796d 100755 --- a/Project.toml +++ b/Project.toml @@ -19,6 +19,7 @@ Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a" ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca" SignalAnalysis = "df1fea92-c066-49dd-8b36-eace3378ea47" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" +StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [compat] ComponentArrays = "0.14" diff --git a/src/Amica.jl b/src/Amica.jl index 39d5ec8..2d062e3 100755 --- a/src/Amica.jl +++ b/src/Amica.jl @@ -8,7 +8,7 @@ module Amica using ProgressMeter using LoopVectorization using AppleAccelerate - + using StaticArrays #using ComponentArrays using Diagonalizations using LogExpFunctions @@ -30,12 +30,14 @@ module Amica function Base.show(io::Core.IO,m::SingleModelAmica) try - global like = m.LL[findlast(m.LL .!= 0)] + ix = findlast(.!isnan.(m.LL)) + @show ix + global like = string(m.LL[ix]) * " (after $(string(ix)) iterations)" catch global like = "not run" end println(io,""" - Amica with: + $(typeof(m)) with: - signal-size: $(size(m.source_signals)) - likelihood: $(like) """) diff --git a/src/helper.jl b/src/helper.jl index b9db918..8bc90eb 100755 --- a/src/helper.jl +++ b/src/helper.jl @@ -35,7 +35,7 @@ end #taken from amica_a.m #L = det(A) * mult p(s|θ) function logpfun(rho, y_rho) - return @inbounds - y_rho .- log(2) .- loggamma(1 + 1 / rho) + return .- y_rho .- log(2) .- loggamma(1 + 1 / rho) end @@ -44,19 +44,29 @@ end return @inbounds copysign.(optimized_pow(abs.(x), rho - 1), x) .* rho end +function ffun!(fp,x,rho) + #f = rho * sign(x).*abs(x).^(rho-1); + #rho .* (IVM.pow(x, rho - 1), x) + for k = eachindex(x) + fp[k] = sign(x[k]) * rho * abs(x[k])^(rho-1) + end +# fp .= sign.(x).*rho .* abs.(x).^(rho-1) +end + + # optimized power function for different cpu architectures function optimized_pow(lhs::AbstractArray{T, 1}, rhs::T) where {T<:Real} optimized_pow(lhs, repeat([rhs], length(lhs))) end function optimized_pow(lhs::AbstractArray{T, 1}, rhs::AbstractArray{T, 1}) where {T<:Real} - if Sys.iswindows() || Sys.islinux() - return IVM.pow(lhs, rhs) - elseif Sys.isapple() - return AppleAccelerate.pow(lhs, rhs) - else +# if Sys.iswindows() || Sys.islinux() +# return IVM.pow(lhs, rhs) +# elseif Sys.isapple() +# return AppleAccelerate.pow(lhs, rhs) +# else return lhs .^ rhs - end +# end end function optimized_log(val) @@ -70,12 +80,21 @@ function optimized_log(val) end -function optimized_exp(val) +function optimized_exp!(val) if Sys.iswindows() || Sys.islinux() - return IVM.exp(val) + IVM.exp!(val) elseif Sys.isapple() - return AppleAccelerate.exp(val) + return AppleAccelerate.exp!(val) else - return exp.(val) + val .= exp.(val) end end +function optimized_exp(val) + #if Sys.iswindows() || Sys.islinux() + # return IVM.exp(val) + #elseif Sys.isapple() +# return AppleAccelerate.exp(val) + #else + return exp.(val) + #end +end diff --git a/src/likelihood.jl b/src/likelihood.jl index 1fd6d83..38b5296 100755 --- a/src/likelihood.jl +++ b/src/likelihood.jl @@ -24,14 +24,20 @@ function calculate_LL!(myAmica::MultiModelAmica) end #Update loop for Lt and u (which is saved in z). Todo: Rename -function loopiloop!(myAmica::SingleModelAmica, y_rho) - (n,_) = size(myAmica.source_signals) - - for i in 1:n - Q = calculate_Q(myAmica,i, y_rho) # myAmica.Q - calculate_u!(myAmica,Q,i) # myAmica.z +function loopiloop!(myAmica::SingleModelAmica{T,ncomps,nmix}, y_rho) where {T,ncomps,nmix} + N = size(myAmica.source_signals,2) + n = size(myAmica.source_signals,1) + Q = Array{T}(undef,nmix,N) + gg = myAmica.learnedParameters + for comps in 1:n + calculate_Q!(Q,gg.proportions[:,comps],gg.scale[:,comps],gg.shape[:,comps],@view(y_rho[comps,:,:])) + @debug :Q, Q[1,1],gg.proportions[1,comps],gg.scale[1,comps],gg.shape[1,comps],myAmica.y[comps,1,1] #,@view(y_rho[comps,:,:])) + #Q = calculate_Q(myAmica,i, y_rho) # myAmica.Q + calculate_u!(myAmica,Q,comps) # myAmica.z + @debug "z",myAmica.z[comps,1,1] calculate_Lt!(myAmica,Q) # myAmica.Q end + @debug "lt",myAmica.Lt[[1,end]] end function loopiloop!(myAmica::MultiModelAmica, y_rho) @@ -39,7 +45,7 @@ function loopiloop!(myAmica::MultiModelAmica, y_rho) (n,_) = size(myAmica.models[1].source_signals) for h in 1:M #run along models - Threads.@threads for i in 1:n #run along components + for i in 1:n #run along components Q = calculate_Q(myAmica.models[h], i, y_rho) calculate_u!(myAmica.models[h], @view(Q[:, n, :]),i) calculate_Lt!(myAmica.models[h], @view(Q[:, n, :])) @@ -47,54 +53,92 @@ function loopiloop!(myAmica::MultiModelAmica, y_rho) end end -@views function calculate_Q(myAmica::SingleModelAmica, i, y_rho) +function calculate_Q(myAmica::SingleModelAmica, i, y_rho) (n,N) = size(myAmica.source_signals) m = myAmica.m Q = zeros(m,N) - - for j in 1:m - Q[j,:] .= log(myAmica.learnedParameters.proportions[j,i]) + 0.5 * log(myAmica.learnedParameters.scale[j,i]) .+ logpfun(myAmica.learnedParameters.shape[j,i], y_rho[i, :, j]) + gg = myAmica.learnedParameters + @views calculate_Q!(Q,gg.proportions[:,i],gg.scale[:,i],gg.shape[:,i],y_rho[i,:,:]) + return Q +end + +function calculate_Q!(Q,proportions,scale,shape,y_rho) + for j in 1:length(proportions) + Q[j,:] .= log(proportions[j]) + 0.5 * log(scale[j]) .+ logpfun(shape[j], y_rho[ :, j]) end return Q end +#function calculate_Q(myAmica::SingleModelAmica, i, y_rho) +# (n,N) = size(myAmica.source_signals) +# m = myAmica.m +# Q = zeros(m,N) +# +# for k in 1:n +# for j in 1:m +# Q[j,k] = log(myAmica.learnedParameters.proportions[j,i]) + +# 0.5 * log(myAmica.learnedParameters.scale[j,i]) + +# logpfun(myAmica.learnedParameters.shape[j,i], y_rho[i, k, j]) +# end +# end +# +# return Q +#end + #calculates u but saves it into z. MultiModel also uses the SingleModel version -@views function calculate_u!(myAmica::SingleModelAmica, Q, i) + function calculate_u!(myAmica::SingleModelAmica{T,ncomps,nmix}, Q, i) where {T,ncomps,nmix} m = size(myAmica.learnedParameters.scale,1) + #scratch_Q = similar(@view(Q[1,:])) + #scratch_Q = similar(Q) + tmp = Array{T}(undef,size(Q,1),size(Q,2)) if m > 1 - for j in 1:m - myAmica.z[i, :, j] .= (1 ./ sum(optimized_exp(Q .- Q[j, :]'), dims=1))[:] + # i is component/channel + ixes = 1:m + for j in ixes # 1:3 m mixtures + # I think it is very hard to optimize the Q .- Q[j,:] inner term +# ix = (ixes)[ixes .∈ 2] + tmp .= @view(Q[:,:]) .- @view(Q[j, :])' + #tmp .= exp.(tmp) + #IVM.exp!(tmp) + optimized_exp!(tmp) + sum!( @view(myAmica.z[i,:,j])',tmp) + end end + myAmica.z[i,:,:] .= 1 ./ @view(myAmica.z[i,:,:]) end #Applies location and scale parameter to source signals (per generalized Gaussian) @views function calculate_y!(myAmica::SingleModelAmica) - for j in 1:myAmica.m - myAmica.y[:,:,j] .= sqrt.(myAmica.learnedParameters.scale[j,:]) .* (myAmica.source_signals[:,:] .- myAmica.learnedParameters.location[j,:]) - end -end -@views function calculate_y!(myAmica::MultiModelAmica) - n = size(myAmica.models[1].A,1) - for h in 1:size(myAmica.models,1) - #=Threads.@threads=# for i in 1:n - for j in 1:myAmica.m - myAmica.models[h].y[i,:,j] = sqrt(myAmica.models[h].learnedParameters.scale[j,i]) * (myAmica.models[h].source_signals[i,:] .- myAmica.models[h].learnedParameters.location[j,i]) - end + + for j in 1:myAmica.m + myAmica.y[:,:,j] .= sqrt.(myAmica.learnedParameters.scale[j,:]) .* (myAmica.source_signals .- myAmica.learnedParameters.location[j,:]) end - end +# @debug myAmica.y[1,1,1:3],myAmica.learnedParameters.scale[1:3,1], myAmica.learnedParameters.location[1:3,1] +# for j in 1:myAmica.m +# for i = 1:size(myAmica.source_signals,2) +# for k = 1: size(myAmica.source_signals,1) +# myAmica.y[k,i,j] = sqrt(myAmica.learnedParameters.scale[j,k]) * (myAmica.source_signals[k,i] - myAmica.learnedParameters.location[j,k]) +# end +# end + #end + + +end + +function calculate_y!(myAmica::MultiModelAmica) + calculate_y!.(myAmica.models[h]) end #Calculates Likelihood for each time sample and for each ICA model function calculate_Lt!(myAmica::SingleModelAmica,Q) m = size(myAmica.learnedParameters.scale,1) if m > 1 - Qmax = ones(m,1).*maximum(Q,dims=1); - myAmica.Lt[:] = myAmica.Lt' .+ Qmax[1,:]' .+ logsumexp(Q - Qmax,dims = 1) + myAmica.Lt .+= logsumexp(Q;dims=1)[1,:] else - myAmica.Lt[:] = myAmica.Lt .+ Q[1,:]#todo: test + myAmica.Lt[:] = myAmica.Lt .+ Q[1,:] #todo: test end end diff --git a/src/main.jl b/src/main.jl index 2adf9aa..4b83f7f 100755 --- a/src/main.jl +++ b/src/main.jl @@ -13,9 +13,9 @@ function fit!(amica::AbstractAmica, data; kwargs...) end function amica!(myAmica::AbstractAmica, - data; - lrate = LearningRate(), - shapelrate = LearningRate(;lrate = 0.1,minimum=0.5,maximum=5,init=1.5), + data::AbstractMatrix{T}; + lrate = LearningRate{T}(), + shapelrate = LearningRate{T}(;lrate = 0.1,minimum=0.5,maximum=5,init=1.5), remove_mean = true, do_sphering = true, show_progress = true, @@ -26,9 +26,9 @@ function amica!(myAmica::AbstractAmica, update_shape = 1, mindll = 1e-8, - kwargs...) + kwargs...) where {T} - initialize_shape_parameter(myAmica,shapelrate) + initialize_shape_parameter!(myAmica,shapelrate) (n, N) = size(data) m = myAmica.m @@ -54,26 +54,39 @@ function amica!(myAmica::AbstractAmica, lambda = zeros(n, 1) prog = ProgressUnknown("Minimizing"; showspeed=true) - y_rho = zeros(size(myAmica.y)) + y_rho = similar(myAmica.y) for iter in 1:maxiter + gg = myAmica.learnedParameters + @debug :scale,gg.scale[2],:location,gg.location[2],:proportions,gg.proportions[2],:shape,gg.shape[2] + @debug println("") #E-step + #@show typeof(myAmica.A),typeof(data) + @debug :A myAmica.A[1:2,1:2] + + @debug :source_signals myAmica.source_signals[1],myAmica.source_signals[end] update_sources!(myAmica, data) + @debug :source_signals myAmica.source_signals[1],myAmica.source_signals[end] calculate_ldet!(myAmica) initialize_Lt!(myAmica) myAmica.Lt .+= LLdetS - calculate_y!(myAmica) + + + calculate_y!(myAmica) + @debug :y, myAmica.y[1,1:3,1] # pre-calculate abs(y)^rho for j in 1:m for i in 1:n - y_rho[i,:,j] .= optimized_pow(abs.(myAmica.y[i,:,j]), myAmica.learnedParameters.shape[j,i]) + @views y_rho[i,:,j] .= (abs.(myAmica.y[i,:,j]) .^myAmica.learnedParameters.shape[j,i]) end end loopiloop!(myAmica, y_rho) #Updates y and Lt. Todo: Rename + + calculate_LL!(myAmica) @@ -106,8 +119,11 @@ function amica!(myAmica::AbstractAmica, rethrow() end end - + @debug iter, :A myAmica.A[1:2,1:2] + reparameterize!(myAmica, data) + + @debug iter, :A myAmica.A[1:2,1:2] #Shows current progress show_progress && ProgressMeter.next!(prog; showvalues=[(:LL, myAmica.LL[iter])]) @@ -120,5 +136,6 @@ function amica!(myAmica::AbstractAmica, if remove_mean add_means_back!(myAmica, removed_mean) end + @debug myAmica.LL return myAmica end \ No newline at end of file diff --git a/src/parameters.jl b/src/parameters.jl index ce89883..b61b159 100644 --- a/src/parameters.jl +++ b/src/parameters.jl @@ -1,18 +1,12 @@ #Normalizes source density location parameter (mu), scale parameter (beta) and model centers -function reparameterize!(myAmica::SingleModelAmica, data) - (n,N) = size(myAmica.source_signals) - mu = myAmica.learnedParameters.location - beta = myAmica.learnedParameters.scale +function reparameterize!(myAmica::SingleModelAmica, data) - for i in 1:n - tau = norm(myAmica.A[:,i]) - myAmica.A[:,i] ./= tau - mu[:,i] .*= tau - beta[:,i] ./= tau^2 - end - - myAmica.learnedParameters.location .= mu - myAmica.learnedParameters.scale .= beta + tau = norm.(eachcol(myAmica.A)) + @debug size(tau) tau + myAmica.A = myAmica.A ./ tau' + myAmica.learnedParameters.location = myAmica.learnedParameters.location .* tau' + myAmica.learnedParameters.scale = myAmica.learnedParameters.scale ./tau'.^2 + return myAmica end @@ -73,14 +67,16 @@ function calculate_z!(myAmica::MultiModelAmica, i,j,h) end #Updates the Gaussian mixture proportion parameter -function update_mixture_proportions!(sumz, myAmica::SingleModelAmica,j,i) +function update_mixture_proportions!(myAmica::SingleModelAmica,sumz) N = size(myAmica.source_signals,2) if myAmica.m > 1 - myAmica.learnedParameters.proportions[j,i] = sumz / N + + myAmica.learnedParameters.proportions = sumz' ./ N end end function update_mixture_proportions!(sumz, myAmica::MultiModelAmica,j,i,h) + error("outdated") if myAmica.m > 1 myAmica.models[h].learnedParameters.proportions[j,i] = sumz / myAmica.ica_weights[h] end @@ -122,6 +118,7 @@ end function update_scale(zfp,y,scale,z,shape) if shape <= 2 db = sum(zfp.*y) + #@show db if db > 0 scale = scale ./ db end @@ -133,22 +130,19 @@ function update_scale(zfp,y,scale,z,shape) end #Sets the initial value for the shape parameter of the GeneralizedGaussians for each Model -function initialize_shape_parameter(myAmica::SingleModelAmica, shapelrate::LearningRate) - myAmica.learnedParameters.shape .= shapelrate.init .*myAmica.learnedParameters.shape +function initialize_shape_parameter!(myAmica::SingleModelAmica, shapelrate::LearningRate) + myAmica.learnedParameters.shape = shapelrate.init .*myAmica.learnedParameters.shape end -function initialize_shape_parameter(myAmica::MultiModelAmica, shapelrate::LearningRate) - M = size(myAmica.models,1) - for h in 1:M - myAmica.models[h].learnedParameters.shape .= shapelrate.init .*myAmica.models[h].learnedParameters.shape - end +function initialize_shape_parameter!(myAmica::MultiModelAmica, shapelrate::LearningRate) + initialize_shape_parameter!.(myAmica.models) end #Updates Gaussian mixture Parameters and mixing matrix. todo: rename since its not a loop for single model function update_loop!(myAmica::SingleModelAmica, lambda, y_rho, shapelrate, update_shape, iter, do_newton, newt_start_iter, lrate) #Update parameters g, kappa = update_parameters!(myAmica, lambda, y_rho, shapelrate, update_shape) - + @debug :g, g[1],:kappa kappa[1] #Checks for NaN in parameters before updating the mixing matrix if any(isnan, kappa) || any(isnan, myAmica.source_signals) || any(isnan, lambda) || any(isnan, g) || any(isnan, myAmica.learnedParameters.proportions) throw(AmicaNaNException()) @@ -191,16 +185,17 @@ end #Updates Gaussian mixture parameters. It also returns g, kappa and lamda which are needed to apply the newton method. #Todo: Save g, kappa, lambda in structure, remove return -@views function update_parameters!(myAmica::SingleModelAmica, lambda, y_rho, lrate_rho::LearningRate, upd_shape) - alpha = myAmica.learnedParameters.proportions - beta = myAmica.learnedParameters.scale - mu = myAmica.learnedParameters.location - rho = myAmica.learnedParameters.shape +@views function update_parameters!(myAmica::SingleModelAmica{T,ncomps,nmix}, lambda, y_rho, lrate_rho::LearningRate, upd_shape) where {T,ncomps,nmix} + gg = myAmica.learnedParameters + #alpha = myAmica.learnedParameters.proportions + #beta = myAmica.learnedParameters.scale + #mu = myAmica.learnedParameters.location + #rho = myAmica.learnedParameters.shape (n,N) = size(myAmica.source_signals) m = myAmica.m - g = zeros(n,N) - kappa = zeros(n,1) - zfp = zeros(m, N) + g = zeros(T,n,N) + kappa = zeros(T,n,1) + zfp = zeros(T,m, N) # update # - myAmica.learnedParameters.proportions @@ -216,11 +211,9 @@ end sumz[sumz .< 0] .= 1 myAmica.z ./= sumz end - for i in 1:n - for j in 1:m - update_mixture_proportions!(sumz[i, 1, j],myAmica,j,i) - end - end + + update_mixture_proportions!(myAmica,sumz[:, 1, :]) + # update # - fp @@ -238,42 +231,55 @@ end # - alpha # - beta # - mu + kp = similar(gg.shape) + + updated_location = Array(similar(gg.shape)) # convert from StaticMatrix to Mutable + updated_scale = Array(similar(gg.shape)) + + fp = Vector{T}(undef,N) for i in 1:n for j in 1:m - fp = ffun(myAmica.y[i,:,j], rho[j,i]) + #@show size(fp), size(myAmica.y) + ffun!(fp,myAmica.y[i,:,j], gg.shape[j,i]) zfp[j,:] .= myAmica.z[i,:,j] .* fp - g[i,:] .+= alpha[j,i] .* sqrt(beta[j,i]) .*zfp[j,:] - - kp = beta[j,i] .* sum(zfp[j,:].*fp) - - kappa[i] += alpha[j,i] * kp - - lambda[i] += alpha[j,i] * (sum(myAmica.z[i,:,j].*(fp.*myAmica.y[i,:,j] .-1).^2) + mu[j,i]^2 * kp) - mu[j,i] = update_location(myAmica,rho[j,i],zfp[j,:],myAmica.y[i,:,j],mu[j,i],beta[j,i],kp) - + g[i,:] .+= gg.proportions[j,i] .* sqrt(gg.scale[j,i]) .*zfp[j,:] + + + kp[j,i] = gg.scale[j,i] .* sum(zfp[j,:].*fp) + + kappa[i] += gg.proportions[j,i] * kp[j,i] + + lambda[i] += gg.proportions[j,i] * (sum(myAmica.z[i,:,j].*(fp.*myAmica.y[i,:,j] .-1).^2) + gg.location[j,i]^2 * kp[j,i]) + @debug fp[1],zfp[j,1],g[i,1],gg.proportions[j,i],gg.scale[j,i], kp[j,i],kappa[i] + updated_location[j,i] = update_location(myAmica,gg.shape[j,i],zfp[j,:],myAmica.y[i,:,j],gg.location[j,i],gg.scale[j,i],kp[j,i]) - beta[j,i] = update_scale(zfp[j,:],myAmica.y[i,:,j],beta[j,i],myAmica.z[i,:,j],rho[j,i]) + @debug :zfp,zfp[j,1],:y,myAmica.y[i,1,j],:z,myAmica.z[i,1,j] + updated_scale[j,i] = update_scale(zfp[j,:],myAmica.y[i,:,j],gg.scale[j,i],myAmica.z[i,:,j],gg.shape[j,i]) end end + + #mu = update_location(myAmica,rho,zfp,mu,beta,kp) + + # update rho # depends on rho, zfp, myAmica.y, mu, beta if upd_shape == 1 dr = sum(myAmica.z .* optimized_log(y_rho) .* y_rho, dims=2) - + updated_shape = Array(similar(gg.shape)) for i in 1:n for j in 1:m - rho[j, i] = update_shape(rho[j, i], dr[i, 1, j], lrate_rho) + updated_shape[j, i] = update_shape(gg.shape[j, i], dr[i, 1, j], lrate_rho) end end end - myAmica.learnedParameters.proportions = alpha - myAmica.learnedParameters.scale = beta - myAmica.learnedParameters.location = mu - myAmica.learnedParameters.shape = rho + #myAmica.learnedParameters.proportions = alpha + myAmica.learnedParameters.scale = updated_scale + myAmica.learnedParameters.location = updated_location + myAmica.learnedParameters.shape = updated_shape return g, kappa end @@ -371,6 +377,10 @@ end #Updates source singal estimations by unmixing the data function update_sources!(myAmica::SingleModelAmica, data) + #myAmica.source_signals .= myAmica.A \ data# + #myAmica.source_signals .= myAmica.A \ data# + #ldiv!(myAmica.source_signals,myAmica.A,data) + myAmica.source_signals = pinv(myAmica.A) * data end diff --git a/src/types.jl b/src/types.jl index b616aba..e1cbd8a 100755 --- a/src/types.jl +++ b/src/types.jl @@ -1,94 +1,97 @@ -mutable struct GGParameters{T} - proportions::Array{T, 2} #source density mixture proportions - scale::Array{T, 2} #source density inverse scale parameter - location::Array{T, 2} #source density location parameter - shape::Array{T, 2} #source density shape paramters +mutable struct GGParameters{T,ncomps,nmix} + proportions::SMatrix{nmix,ncomps,T} #source density mixture proportions + scale::SMatrix{nmix,ncomps,T} #source density inverse scale parameter + location::SMatrix{nmix,ncomps,T} #source density location parameter + shape::SMatrix{nmix,ncomps,T} #source density shape paramters end abstract type AbstractAmica end -mutable struct SingleModelAmica{T} <:AbstractAmica +mutable struct SingleModelAmica{T,ncomps,nmix} <:AbstractAmica source_signals::Array{T,2} - learnedParameters::GGParameters{T} - m::Union{Integer, Nothing} #Number of gaussians - A::Array{T,2} # unmixing matrices for each model + learnedParameters::GGParameters{T,ncomps,nmix} + m::Int #Number of gaussians + A::SMatrix{ncomps,ncomps,T} # unmixing matrices for each model S::Array{T,2} # sphering matrix z::Array{T,3} y::Array{T,3} - centers::Array{T} #model centers - Lt::Array{Float64} #log likelihood of time point for each model ( M x N ) - LL::Array{T} #log likelihood over iterations todo: change to tuple - ldet::Float64 + centers::Array{T,1} #model centers + Lt::Array{T,1} #log likelihood of time point for each model ( M x N ) + LL::Array{T,1} #log likelihood over iterations todo: change to tuple + ldet::T maxiter::Int end -mutable struct MultiModelAmica <:AbstractAmica - models::Array{SingleModelAmica} #Array of SingleModelAmicas + +mutable struct MultiModelAmica{T} <:AbstractAmica + models::Array{SingleModelAmica{T}} #Array of SingleModelAmicas normalized_ica_weights #Model weights (normalized) ica_weights_per_sample #Model weight for each sample ica_weights #Model weight for all samples maxiter::Int #Number of iterations m::Int #Number of Gaussians - LL::AbstractVector #Log-Likelihood + LL::Array{T,1} #Log-Likelihood end #Structure for Learning Rate type with initial value, minumum, maximum etc. Used for learning rate and shape lrate using Parameters -@with_kw mutable struct LearningRate - lrate::Real = 0.1 - init::Float64 = 0.1 - minimum::Float64 = 0. - maximum::Float64 = 1.0 - natural_rate::Float64 = 0.1 - decreaseFactor::Float64 = 0.5 +@with_kw mutable struct LearningRate{T} + lrate::T = 0.1 + init::T = 0.1 + minimum::T = 0. + maximum::T = 1.0 + natural_rate::T = 0.1 + decreaseFactor::T = 0.5 end #Data type for AMICA with just one ICA model. todo: rename gg parameters function SingleModelAmica(data::AbstractArray{T}; m=3, maxiter=500, A=nothing, location=nothing, scale=nothing, kwargs...) where {T<:Real} (n, N) = size(data) + ncomps = n + nmix = m #initialize parameters - centers = zeros(n) + centers = zeros(T,n) eye = Matrix(I, n, n) if isnothing(A) - A = zeros(n,n) + A = zeros(T,n,n) A[:,:] = eye[n] .+ 0.1*rand(n,n) for i in 1:n A[:,i] = A[:,i] / norm(A[:,i]) end end - proportions = (1/m) * ones(m,n) + proportions = (1/m) * ones(T,m,n) if isnothing(location) if m > 1 - location = 0.1 * randn(m, n) + location = 0.1 * randn(T,m, n) else - location = zeros(m, n) + location = zeros(T,m, n) end end if isnothing(scale) - scale = ones(m, n) + 0.1 * randn(m, n) + scale = ones(T,m, n) + 0.1 * randn(T,m, n) end - shape = ones(m, n) + shape = ones(T,m, n) - y = zeros(n,N,m) + y = zeros(T,n,N,m) - Lt = zeros(N) - z = ones(n,N,m)/N + Lt = zeros(T,N) + z = ones(T,n,N,m)/N #Sets some parameters to nothing if used my MultiModel to only have them once if isnothing(maxiter) LL = nothing m = nothing else - LL = Float64[] + LL = T[] end ldet = 0.0 - source_signals = zeros(n,N) + source_signals = zeros(T,n,N) - return SingleModelAmica{T}(source_signals,GGParameters{T}(proportions,scale,location,shape),m,A,I(size(A,1)), z,y,#=Q,=#centers,Lt,LL,ldet,maxiter) + return SingleModelAmica{T,ncomps,nmix}(source_signals,GGParameters{T,ncomps,nmix}(proportions,scale,location,shape),m,A,I(size(A,1)), z,y,#=Q,=#centers,Lt,LL,ldet,maxiter) end #Data type for AMICA with multiple ICA models diff --git a/test/compare_amica_implementations.jl b/test/compare_amica_implementations.jl index 5f4c468..b1d8992 100644 --- a/test/compare_amica_implementations.jl +++ b/test/compare_amica_implementations.jl @@ -8,8 +8,8 @@ using CairoMakie #--- includet("../../src/simulate_data.jl") includet("../../src/fortran_tools.jl") -n_chan=3 -n_time=10000 +n_chan=10 +n_time=500_000 x,A,s = simulate_data(;T=50,n_chan,n_time,type=:gg6) i_scale,i_location,i_A = init_params(;n_chan,n_time) @@ -22,36 +22,41 @@ f #--- -maxiter = 200 +maxiter = 50 + # matlab run mat""" tic [mA,mc,mLL,mLt,mgm,malpha,mmu,mbeta,mrho] = amica_a($x,1,3,$maxiter,$i_location,$i_scale,$i_A,0); -toc +mt = toc """ mat""" tic [mAopt,mWopt,mSopt,mkhindsopt,mcopt,mLLopt,mLtopt,mgmopt,malphaopt,mmuopt,mbetaopt,mrhoopt] = amica_optimized($x,1,3,$maxiter,1,1,$i_location,$i_scale,$i_A); -toc +mtopt = toc + """ mA = @mget(mA); # get matlab var mAopt = @mget(mAopt); # get matlab opt var +t_mopt = @mget(mtopt); # get matlab var +t_m = @mget(mt); # get matlab var + # Fortran setup + ran fortran_setup(Float32.(x);max_threads=1,max_iter=maxiter) -@time run(`/scratch/projects/fapra_amica/fortran/amica15test julia.param`) +t_f32 = @elapsed run(`/scratch/projects/fapra_amica/fortran/amica15test julia.param`) fA = reshape(reinterpret(Float64,(read("amicaout/A"))),n_chan,n_chan) fortran_setup(Float64.(x);max_threads=1,max_iter=maxiter,dble_data=1) -@time run(`/scratch/projects/fapra_amica/fortran/amica15test julia.param`) +t_f64 = @elapsed run(`/scratch/projects/fapra_amica/fortran/amica15test julia.param`) -#--- Julia run -am32 = SingleModelAmica(Float32.(x);maxiter=maxiter,A=deepcopy(i_A),location=i_location,scale=i_scale) -@time fit!(am32,Float32.(x)) +# Julia run +am32 = SingleModelAmica(Float32.(x);maxiter=maxiter,A=deepcopy(i_A),location=deepcopy(i_location),scale=deepcopy(i_scale)) +t_am32 = @elapsed fit!(am32,Float32.(x)) -am64 = SingleModelAmica(Float64.(x);maxiter=maxiter,A=deepcopy(i_A),location=i_location,scale=i_scale) -@time fit!(am64,Float64.(x)) +am64 = SingleModelAmica(Float64.(x);maxiter=maxiter,A=deepcopy(i_A),location=deepcopy(i_location),scale=deepcopy(i_scale)) +t_am64 = @elapsed fit!(am64,Float64.(x)) #am = SingleModelAmica(Float16.(x);maxiter=maxiter,A=i_A,location=i_location,scale=i_scale) #@time fit!(am,x) #vcat(@mget(mLL),am.LL') @@ -62,7 +67,7 @@ series(f2[1,1],inv(am64.A)*x, axis=(;title="unmixed julia64")) series(f2[2,1],inv(am32.A)*x, axis=(;title="unmixed julia32")) series(f2[3,1],inv(mA)*x, axis=(;title="unmixed matlab")) series(f2[4,1],inv(mAopt)*x, axis=(;title="unmixed matlab_optimizd")) -series(f2[5,1],.-inv(fA')*x, axis=(;title="unmixed fortran")) +series(f2[5,1],inv(fA)*x, axis=(;title="unmixed fortran")) series(f2[6,1],s,axis=(;title="original source")) series(f2[7,1],x, axis=(;title="original mixed")) hidedecorations!.(f2.content) @@ -73,17 +78,36 @@ f2 #--- compare LLs + +visibnoise = 0.02 # add little bit of noise to distinguish identical lines mLL = @mget mLL mLLopt = @mget mLLopt fLL = reinterpret(Float64,(read("amicaout/LL"))) -f = Figure(size=(1024,1024)) + +f = Figure(size=(1024,512)) ax = f[1,1] = Axis(f) labels = ["julia64","julia32","matlab", "matlab opt","fortran"] for (ix,d) = enumerate([am64.LL, am32.LL,mLL[1,:],mLLopt[1,:],fLL]) - lines!(ax,d;label=labels[ix]) + lines!(ax,d .+ rand(size(d)...).*visibnoise;label=labels[ix]) end axislegend(ax) -ylims!(ax,-2.3,-0.5) +ylims!(ax,-5,0.1) + + +ax_T = f[1,2] = Axis(f) +scatter!(ax_T,[t_am32,t_am64,t_m,t_mopt, t_f32,t_f64]) +ax_T.xticks = 1:6 +ax_T.xtickformat = x->["am32","am64","mat","matopt","f32","f64"][Int.(x)] + f -#--- compare A + +#--- + + +#---- +am32 = SingleModelAmica(Float32.(x);maxiter=10)#,A=deepcopy(i_A),location=deepcopy(i_location),scale=deepcopy(i_scale)) +@profview fit!(am32,Float32.(x)) + +fortran_setup(Float32.(x);max_threads=1,max_iter=5) +t_f32 = @elapsed run(`/scratch/projects/fapra_amica/fortran/amica15test julia.param`) \ No newline at end of file