diff --git a/.gitignore b/.gitignore
index 4d3d00e..1ca7abe 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,3 +10,7 @@ coverage
docs/build/
env
node_modules
+*.txt
+/*.png
+/*.h5
+*.so
\ No newline at end of file
diff --git a/Project.toml b/Project.toml
index a02bcd3..ecd7cba 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,11 +1,18 @@
name = "UnfoldRIDE"
uuid = "348b5554-d1f6-52bd-94b1-729601d351bf"
-authors = [
- "Till Prölß",
- " René Skukies",
- " Benedikt Ehinger",
-]
+authors = ["Till Prölß", " René Skukies", " Benedikt Ehinger"]
version = "0.1.0"
-[compat]
-julia = "1.10"
+[deps]
+CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
+DSP = "717857b8-e6f2-59f4-9121-6e50c889abd2"
+DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
+DataFramesMeta = "1313f7d8-7da2-5740-9ea0-a2ca25f37964"
+Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
+FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
+Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a"
+Peaks = "18e31ff7-3703-566c-8e60-38913d67486b"
+Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
+Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
+Unfold = "181c99d8-e21b-4ff3-b70b-c233eddec679"
diff --git a/README.md b/README.md
index f810229..9e27979 100644
--- a/README.md
+++ b/README.md
@@ -6,11 +6,62 @@
[](https://github.com/unfoldtoolbox/UnfoldRIDE.jl/actions/workflows/Lint.yml?query=branch%3Amain)
[](https://github.com/unfoldtoolbox/UnfoldRIDE.jl/actions/workflows/Docs.yml?query=branch%3Amain)
[](https://codecov.io/gh/unfoldtoolbox/UnfoldRIDE.jl)
-[](https://doi.org/FIXME)
+
[](CODE_OF_CONDUCT.md)
[](#contributors)
[](https://github.com/JuliaBesties/BestieTemplate.jl)
+A re-implementation of the [RIDE](https://cns.hkbu.edu.hk/RIDE.htm) algorithm in Julia with an extension to replace the RIDEs iterative decomposition with an [Unfold](https://github.com/unfoldtoolbox/Unfold.jl) deconvolution.
+
+## Install
+
+### Installing Julia
+
+
+Click to expand
+
+The recommended way to install julia is [juliaup](https://github.com/JuliaLang/juliaup).
+It allows you to, e.g., easily update Julia at a later point, but also test out alpha/beta versions etc.
+
+TL:DR; If you dont want to read the explicit instructions, just copy the following command
+
+#### Windows
+
+AppStore -> JuliaUp, or `winget install julia -s msstore` in CMD
+
+#### Mac & Linux
+
+`curl -fsSL https://install.julialang.org | sh` in any shell
+
+
+### Installing UnfoldRIDE
+
+```julia
+using Pkg
+Pkg.add("UnfoldRIDE")
+```
+
+## Quickstart
+
+```Julia
+#config for ride algorithm
+cfg = RideConfig(
+ #sfreq is the sampling frequency of the data
+ sfreq = 100,
+ #ranges for the individual components have to be determined through manual inspection of the data
+ s_range = [-0.1, 0.3],
+ r_range = [0, 0.4],
+ c_range = [-0.4, 0.4],
+ #the range in which the initial peak estimation for the C component is performed
+ c_estimation_range = [0, 0.9],
+ #the range for one epoch
+ epoch_range = [-0.1, 1]
+)
+#run the ride algorithm
+resultsClassic = ride_algorithm(ClassicMode, data_noisy, evts_without_c, cfg)
+resultsUnfold = ride_algorithm(UnfoldMode, data_noisy, evts_without_c, cfg)
+```
+
## How to Cite
If you use UnfoldRIDE.jl in your work, please cite using the reference given in [CITATION.cff](https://github.com/unfoldtoolbox/UnfoldRIDE.jl/blob/main/CITATION.cff).
diff --git a/dev/Project.toml b/dev/Project.toml
new file mode 100644
index 0000000..053d298
--- /dev/null
+++ b/dev/Project.toml
@@ -0,0 +1,25 @@
+[deps]
+BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
+CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
+DSP = "717857b8-e6f2-59f4-9121-6e50c889abd2"
+DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
+DataFramesMeta = "1313f7d8-7da2-5740-9ea0-a2ca25f37964"
+Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
+FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
+HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
+LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
+PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d"
+Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a"
+Peaks = "18e31ff7-3703-566c-8e60-38913d67486b"
+Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
+SignalAnalysis = "df1fea92-c066-49dd-8b36-eace3378ea47"
+StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
+Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
+Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
+Unfold = "181c99d8-e21b-4ff3-b70b-c233eddec679"
+UnfoldSim = "ed8ae6d2-84d3-44c6-ab46-0baf21700804"
+UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"
+
+[sources]
+UnfoldSim = {rev = "v4.0", url = "https://github.com/unfoldtoolbox/UnfoldSim.jl"}
diff --git a/dev/create_sysimage.jl b/dev/create_sysimage.jl
new file mode 100644
index 0000000..64494a6
--- /dev/null
+++ b/dev/create_sysimage.jl
@@ -0,0 +1,27 @@
+# This script creates a sysimage for the RIDE project
+# WARNING: This will take a LONG time (1-2 hours)
+# The sysimage massively speeds up julia startup time
+# After the sysimage is created, you need to add:
+# "-JRIDE_Sysimage.so" to your "julia.additionalArgs" in your vscode settings
+# You can use Base.loaded_modules to confirm that the sysimage is loaded correctly
+
+include("./runner.jl")
+# Get all packages from the current project
+all_packages = String[]
+for (uuid, dep) in Pkg.dependencies()
+ dep.is_direct_dep || continue
+ dep.version === nothing && continue
+ push!(all_packages, dep.name)
+end
+
+# Remove unneeded packages
+do_not_include = ["PackageCompiler"]
+package_list = filter(x -> x ∉ do_not_include, all_packages)
+
+using PackageCompiler
+
+PackageCompiler.create_sysimage(
+ package_list;
+ sysimage_path = "RIDE_Sysimage.so",
+ precompile_execution_file = "./dev/runner.jl",
+)
\ No newline at end of file
diff --git a/dev/data/matlab_ride_samp_face.h5 b/dev/data/matlab_ride_samp_face.h5
new file mode 100644
index 0000000..a8e2e8e
Binary files /dev/null and b/dev/data/matlab_ride_samp_face.h5 differ
diff --git a/dev/plotting_methods.jl b/dev/plotting_methods.jl
new file mode 100644
index 0000000..b1ef8c2
--- /dev/null
+++ b/dev/plotting_methods.jl
@@ -0,0 +1,78 @@
+function plot_c_latency_estimation_four_epochs(data_epoched, c_latencies, c_erp)
+ #plotting the estimated c latencies on a couple of epochs
+ f = Figure()
+ c_latency_from_epoch_start = c_latencies .- round(Int, cfg.epoch_range[1] * cfg.sfreq)
+ for a = 1:2, b = 1:2
+ i = (a - 1) * 2 + b
+ Axis(f[a, b], title = "Estimated C latency epoch $i")
+ lines!(f[a, b], data_epoched[1, :, i]; color = "black")
+ y = c_latency_from_epoch_start[i]:(c_latency_from_epoch_start[i]+length(c_erp)-1)
+ lines!(f[a, b], y, c_erp; color = "red")
+ end
+ return f
+end
+
+function plot_first_three_epochs_of_raw_data(data, evts)
+ f = Figure()
+ Axis(f[1, 1], title = "First three epochs")
+ evts_s = @subset(evts, :event .== 'S')
+ evts_r = @subset(evts, :event .== 'R')
+ graph = lines!(first(vec(data), evts_s.latency[4]); color = "black")
+ graph_r = vlines!(first(evts_r.latency, 3), color = "blue")
+ graph_s = vlines!(first(evts_s.latency, 3), color = "red")
+
+ Legend(
+ f[1, 2],
+ [graph, graph_r, graph_s],
+ ["Data", "Reaction Times", "Stimulus Onsets"],
+ )
+ display(f)
+end
+
+function plot_first_three_epochs_of_raw_data(data)
+ f = Figure()
+ Axis(f[1, 1], title = "First 600 data points")
+ graph = lines!(first(vec(data), 600); color = "black")
+ Legend(f[1, 2], [graph], ["Data"])
+ display(f)
+end
+
+function plot_interim_results(data, evts, results, cfg)
+ evts_s = @subset(evts, :event .== 'S')
+ data_epoched =
+ Unfold.epoch(data = data, tbl = evts_s, τ = cfg.epoch_range, sfreq = cfg.sfreq)[1]
+ data_epoched = Unfold.drop_missing_epochs(evts_s, data_epoched)[2]
+ raw_erp = mean(data_epoched, dims = 3)[1, :, 1]
+ for (i, r) in enumerate(vcat(results.interim_results, results))
+ f = plot_c_latency_estimation_four_epochs(
+ data_epoched,
+ r.c_latencies,
+ r.c_erp_unpadded,
+ )
+ Label(f[0, :], text = "Estimated C latency, Iteration $(i-1)", halign = :center)
+ display(f)
+ end
+ for (i, r) in enumerate(vcat(results.interim_results, results))
+ f = plot_simple_results(
+ raw_erp,
+ r.s_erp,
+ r.r_erp,
+ r.c_erp;
+ legend = true,
+ title = "Calculated Components, Iteration $(i-1)",
+ )
+ display(f)
+ end
+end
+
+function plot_simple_results(raw_erp, s, r, c; legend = false, f = Figure(), title = "")
+ ax = Axis(f[1, 1], yticks = -100:100, title = title)
+ raw = lines!(raw_erp; color = "black", linewidth = 3, label = "ERP")
+ s = lines!(s; color = "blue", label = "S")
+ c = lines!(c; color = "red", label = "C")
+ r = lines!(r; color = "green", label = "R")
+ if legend
+ axislegend(ax)
+ end
+ return f
+end
\ No newline at end of file
diff --git a/dev/runner.jl b/dev/runner.jl
new file mode 100644
index 0000000..062c08d
--- /dev/null
+++ b/dev/runner.jl
@@ -0,0 +1,130 @@
+using Pkg
+Pkg.activate("./dev")
+
+using Revise
+includet("../src/UnfoldRIDE.jl")
+includet("../test/simulate_test_data.jl")
+includet("./plotting_methods.jl")
+using .UnfoldRIDE
+using CairoMakie
+using StableRNGs
+using BenchmarkTools
+#simulate data
+begin
+ sim_inputs = simulation_inputs()
+ sim_inputs.noise = PinkNoise(; noiselevel = 1)
+ #sim_inputs.c_width = 30
+ #sim_inputs.c_offset = 10
+ #sim_inputs.r_width = 60
+ #sim_inputs.r_offset = 15
+ #sim_inputs.c_beta = -4
+ #sim_inputs.r_beta = 6
+ #sim_inputs.r_offset = 0
+ #sim_inputs.c_beta = -5
+ #sim_inputs.c_offset = 50
+ #sim_inputs.s_offset = 70
+ data, evts, data_clean, evts_clean, data_clean_s, data_clean_r, data_clean_c =
+ simulate_default_plus_clean(sim_inputs)
+ plot_first_three_epochs_of_raw_data(data_clean_s, evts)
+ plot_first_three_epochs_of_raw_data(data, evts)
+end
+
+#run the ride algorithm on the simulated data
+begin
+ #ENV["JULIA_DEBUG"] = "UnfoldRIDE"
+ #config for ride algorithm
+ cfg = RideConfig(
+ sfreq = 100,
+ s_range = [-0.2, 0.4],
+ r_range = [0, 0.8],
+ c_range = [-0.4, 0.4],
+ c_estimation_range = [-0.1, 0.9],
+ epoch_range = [-0.3, 1.6],
+ iteration_limit = 5,
+ heuristic1 = true,
+ heuristic2 = true,
+ heuristic3 = true,
+ save_interim_results = true,
+ )
+
+ save_to_hdf5_ride_format(
+ "./dev/data/simulated_data.h5",
+ data,
+ evts,
+ cfg.epoch_range,
+ 'S',
+ 'R',
+ cfg.sfreq,
+ )
+
+ #remove the C events from the evts table, these will be estimated by the ride algorithm
+ evts_without_c = @subset(evts, :event .!= 'C')
+
+ #run the ride algorithm
+ results = ride_algorithm(ClassicMode, data, evts_without_c, cfg)
+ results2 = ride_algorithm(UnfoldMode, data, evts_without_c, cfg)
+ s_erp = results[1].s_erp
+ r_erp = results[1].r_erp
+ c_erp = results[1].c_erp
+ c_latencies = results[1].c_latencies
+
+ plot_interim_results(data, evts, results[1], cfg)
+end
+
+# calculate and plot clean erps from the simulated data
+# these represent the optimal result from the algorithm
+begin
+ evts_clean_s = @subset(evts_clean, :event .== 'S')
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_s,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_s = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_c,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_c = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_r,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_r = mean(data_epoched_clean, dims = 3)
+
+ #plot the results
+ f = Figure()
+ Axis(f[1, 1], yticks = -100:100)
+ s = lines!(f[1, 1], erp_clean_s[1, :, 1], color = "blue")
+ c = lines!(f[1, 1], erp_clean_c[1, :, 1], color = "red")
+ r = lines!(f[1, 1], erp_clean_r[1, :, 1], color = "green")
+ Legend(f[1, 2], [s, r, c], ["S", "R", "C"])
+ Label(f[0, :], text = "Expected Results")
+ display(f)
+ save("actual_erps.png", f)
+end
+
+if true == false
+ @benchmark ride_algorithm(UnfoldMode, data, evts_without_c, cfg)
+
+ @benchmark ride_algorithm(ClassicMode, data, evts_without_c, cfg)
+end
\ No newline at end of file
diff --git a/dev/runner_matlab_samp.jl b/dev/runner_matlab_samp.jl
new file mode 100644
index 0000000..4e32c7a
--- /dev/null
+++ b/dev/runner_matlab_samp.jl
@@ -0,0 +1,110 @@
+import Pkg
+Pkg.activate("./dev")
+
+using Revise
+includet("../src/UnfoldRIDE.jl")
+includet("../test/simulate_test_data.jl")
+includet("./plotting_methods.jl")
+using .UnfoldRIDE
+using CairoMakie
+using StableRNGs
+
+
+#import data from hdf5. Created for use with the sample data exported from matlab
+function import_data_from_hdf5(file_path, channel)
+ #imported data is in matlab format: timestep, channel, epoch
+ import_data = h5read(file_path, "/dataset_data")
+ rt = h5read(file_path, "/dataset_rt")
+
+ data = Vector{Float64}()
+ #fill the ride_matrix with the data
+ offset = 800
+ for i = 1:offset
+ push!(data, 0)
+ end
+ for x in axes(import_data, 3)
+ for y in axes(import_data, 1)
+ push!(data, import_data[y, channel, x])
+ end
+ for i = 1:offset
+ push!(data, 0)
+ end
+ end
+ s =
+ Vector(1:length(rt)) .* (offset + size(import_data, 1)) .- size(import_data, 1) .+
+ 50
+ rt = Int.(round.(rt ./ 2))
+ rt = rt .+ s
+ evts = DataFrame()
+ for i in eachindex(rt)
+ push!(evts, (event = 'S', latency = s[i]))
+ push!(evts, (event = 'R', latency = rt[i]))
+ end
+
+ return data, evts
+end
+
+#import data
+begin
+ path = "dev/data/matlab_ride_samp_face.h5"
+ data_channels_vector = Vector()
+ for i = 1:65
+ data_channel1 = reshape(import_data_from_hdf5(path, i)[1], (1, :))
+ push!(data_channels_vector, data_channel1)
+ end
+ data = reduce(vcat, data_channels_vector)
+ evts = import_data_from_hdf5(path, 1)[2]
+ @assert(evts == import_data_from_hdf5(path, 2)[2])
+end
+
+#run the ride algorithm on the simulated data
+begin
+ #config for ride algorithm
+ cfg = RideConfig(
+ sfreq = 1,
+ s_range = [0, 250],
+ r_range = [-150, 150],
+ c_range = [-200, 200],
+ c_estimation_range = [0, 400],
+ epoch_range = [-49, 500],
+ iteration_limit = 4,
+ heuristic1 = true,
+ heuristic2 = true,
+ heuristic3 = true,
+ save_interim_results = false,
+ )
+
+ #run the ride algorithm
+ results = ride_algorithm(UnfoldMode, reshape(data[44, :], (1, :)), evts, cfg)
+ plot_interim_results(data[44, :], evts, results[1], cfg)
+end
+
+#benchmark all channels
+using BenchmarkTools
+if true == false
+ @benchmark ride_algorithm(UnfoldMode, data, evts, cfg)
+end
+
+
+#test that the erp calculated on the original imported data and
+#the erp calculated after concatenation and unfold.epoch() is identical
+if true == false
+ channel = 44
+ file_path = "dev/data/matlab_ride_samp_face.h5"
+
+ data, evts = import_data_from_hdf5(file_path, channel)
+ import_data = h5read(file_path, "/dataset_data")
+
+ orig_erp = median(import_data[:, channel, :], dims = 2)
+ f = lines(orig_erp[:, 1], color = :red, linewidth = 3)
+
+ evts_s = @subset(evts, :event .== 'S')
+ new_data_epoched = Unfold.epoch(data = data, tbl = evts_s, τ = [-49, 500], sfreq = 1)[1]
+ n, new_data_epoched = Unfold.drop_missing_epochs(evts_s, new_data_epoched)
+ new_erp = median(new_data_epoched, dims = 3)
+
+ lines!(new_erp[1, :, 1], color = :blue)
+ display(f)
+
+ @assert(orig_erp[:, 1] == new_erp[1, :, 1])
+end
\ No newline at end of file
diff --git a/dev/runner_multichannel.jl b/dev/runner_multichannel.jl
new file mode 100644
index 0000000..eb70e71
--- /dev/null
+++ b/dev/runner_multichannel.jl
@@ -0,0 +1,68 @@
+using Pkg
+Pkg.activate("./dev")
+
+using Revise
+includet("../src/UnfoldRIDE.jl")
+includet("../test/simulate_test_data.jl")
+includet("./plotting_methods.jl")
+using .UnfoldRIDE
+using CairoMakie
+using StableRNGs
+using BenchmarkTools
+#simulate data
+begin
+ sim_inputs = simulation_inputs()
+ #sim_inputs.noise = PinkNoise()
+ data, evts, data_clean, evts_clean, data_clean_s, data_clean_r, data_clean_c =
+ simulate_default_plus_clean(sim_inputs)
+end
+
+begin
+ #ENV["JULIA_DEBUG"] = "UnfoldRIDE"
+ #config for ride algorithm
+ cfg = RideConfig(
+ sfreq = 100,
+ s_range = [-0.2, 0.4],
+ r_range = [0, 0.8],
+ c_range = [-0.4, 0.4], # change to -0.4 , 0.4 or something because it's attached to the latency of C
+ c_estimation_range = [-0.1, 0.9],
+ epoch_range = [-0.3, 1.6],
+ iteration_limit = 4,
+ heuristic1 = true,
+ heuristic2 = true,
+ heuristic3 = true,
+ save_interim_results = true,
+ )
+
+ channels = 3
+ data_channels_vector = Vector()
+ noise = PinkNoise(; noiselevel = 1)
+ for i = 1:channels
+ data_channel1 = reshape(deepcopy(data), (1, :))
+ UnfoldSim.add_noise!(MersenneTwister(i), noise, data_channel1)
+ push!(data_channels_vector, data_channel1)
+ end
+ data_channels = reduce(vcat, data_channels_vector)
+
+
+
+ for i in axes(data_channels, 1)
+ plot_first_three_epochs_of_raw_data(reshape(data_channels[i, :], (1, :)), evts)
+ end
+
+
+ #remove the C events from the evts table, these will be estimated by the ride algorithm
+ evts_without_c = @subset(evts, :event .!= 'C')
+
+ #run the ride algorithm
+ results = ride_algorithm(UnfoldMode, data_channels, evts_without_c, cfg)
+ for i in axes(results, 1)
+ plot_interim_results(reshape(data_channels[i, :], (1, :)), evts, results[i], cfg)
+ end
+end
+
+if true == false
+ @benchmark ride_algorithm(ClassicMode, data_channels, evts_without_c, cfg)
+
+ @benchmark ride_algorithm(UnfoldMode, data_channels, evts_without_c, cfg)
+end
\ No newline at end of file
diff --git a/docs/code/Project.toml b/docs/code/Project.toml
new file mode 100644
index 0000000..e3562a4
--- /dev/null
+++ b/docs/code/Project.toml
@@ -0,0 +1,14 @@
+[deps]
+CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
+DataFramesMeta = "1313f7d8-7da2-5740-9ea0-a2ca25f37964"
+Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a"
+Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
+Unfold = "181c99d8-e21b-4ff3-b70b-c233eddec679"
+UnfoldRIDE = "348b5554-d1f6-52bd-94b1-729601d351bf"
+UnfoldSim = "ed8ae6d2-84d3-44c6-ab46-0baf21700804"
+UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"
+
+[sources]
+UnfoldRIDE = {rev = "main", url = "https://github.com/unfoldtoolbox/UnfoldRIDE.jl"}
+UnfoldSim = {rev = "v4.0", url = "https://github.com/unfoldtoolbox/UnfoldSim.jl"}
diff --git a/docs/code/running_ride.jl b/docs/code/running_ride.jl
new file mode 100644
index 0000000..ba68d59
--- /dev/null
+++ b/docs/code/running_ride.jl
@@ -0,0 +1,52 @@
+#using UnfoldRIDE
+using Statistics
+using Unfold
+
+include("simulate_variable_latency_sequence.jl")
+
+#add some noise to the simulated data
+data_noisy = copy(data)
+UnfoldSim.add_noise!(MersenneTwister(1234), PinkNoise(; noiselevel = 0.9), data_noisy)
+
+#run the ride algorithm on the simulated data
+begin
+ #config for ride algorithm
+ cfg = RideConfig(
+ #sfreq is the sampling frequency of the data
+ sfreq = 100,
+ #ranges for the individual components are determined by manually inspecting the data
+ s_range = [-0.1, 0.3],
+ r_range = [0, 0.4],
+ c_range = [-0.4, 0.4],
+ #the range in which the initial peak estimation for the C component is performed
+ c_estimation_range = [0, 0.9],
+ #the range for one epoch
+ epoch_range = [-0.1, 1],
+ )
+ #run the ride algorithm, first in calssic, then in unfold mode.
+ #We only have one channel, so we only need the first entry from the results vector.
+ resultsClassic = ride_algorithm(ClassicMode, data_noisy, evts_without_c, cfg)[1]
+ resultsUnfold = ride_algorithm(UnfoldMode, data_noisy, evts_without_c, cfg)[1]
+end
+
+#plot the results
+begin
+ f = Figure(size = (1000, 400))
+
+ #plot classic results
+ ax = Axis(f[1, 1], yticks = -100:100, title = "Classic RIDE")
+ raw = lines!(resultsClassic.raw_erp; color = "black", linewidth = 3, label = "ERP")
+ s = lines!(resultsClassic.s_erp; color = "blue", label = "S")
+ c = lines!(resultsClassic.c_erp; color = "red", label = "C")
+ r = lines!(resultsClassic.r_erp; color = "green", label = "R")
+
+ #plot unfold results
+ ax = Axis(f[1, 2], yticks = -100:100, title = "Unfold RIDE")
+ raw = lines!(resultsUnfold.raw_erp; color = "black", linewidth = 3, label = "ERP")
+ s = lines!(resultsUnfold.s_erp; color = "blue", label = "S")
+ c = lines!(resultsUnfold.c_erp; color = "red", label = "C")
+ r = lines!(resultsUnfold.r_erp; color = "green", label = "R")
+ axislegend(ax)
+
+ display(f)
+end
\ No newline at end of file
diff --git a/docs/code/simulate_variable_latency_sequence.jl b/docs/code/simulate_variable_latency_sequence.jl
new file mode 100644
index 0000000..66f66fc
--- /dev/null
+++ b/docs/code/simulate_variable_latency_sequence.jl
@@ -0,0 +1,133 @@
+using UnfoldSim
+using Parameters
+using Random
+using CairoMakie
+using DataFramesMeta
+
+@with_kw struct SequenceOnset <: AbstractOnset
+ stimulus_onset::AbstractOnset
+ components_onset::Vector{AbstractOnset}
+end
+
+function UnfoldSim.simulate_onsets(rng, onset::SequenceOnset, simulation::Simulation)
+ #calculate stimulus onsets
+ stimulus_onsets =
+ simulate_interonset_distances(rng, onset.stimulus_onset, simulation.design)
+ stimulus_offset_accumulated = accumulate(+, stimulus_onsets, dims = 1, init = 0)
+
+ #calculate component offsets
+ components_onsets = Vector{Vector{Int}}()
+ for obj in onset.components_onset
+ Random.seed!(rng, rand(rng, 1:10000))
+ push!(components_onsets, simulate_interonset_distances(rng, obj, simulation.design))
+ end
+
+ #combine the stimulus offsets and component offsets into one vector
+ result = Vector{Int}()
+ for i in axes(stimulus_offset_accumulated, 1)
+ current_offset = stimulus_offset_accumulated[i]
+ push!(result, current_offset)
+ for component_onsets in components_onsets
+ push!(result, current_offset + component_onsets[i])
+ end
+ end
+
+ #cut result to the design size
+ result = result[1:size(simulation.design)]
+ return result
+end
+
+#Define the design
+design =
+ SingleSubjectDesign(; conditions = Dict(:cond => ["car", "face"])) |>
+ x -> RepeatDesign(x, 40)
+
+#Create a sequence design with three components (S, C, R)
+sequence_design = SequenceDesign(design, "SCR")
+
+# Define the components
+s_component_1 =
+ LinearModelComponent(; basis = vcat(p100()), formula = @formula(0 ~ 1), β = [1])
+s_component_2 =
+ LinearModelComponent(; basis = n170(), formula = @formula(0 ~ 1 + cond), β = [1, 0])
+c_component =
+ LinearModelComponent(; basis = p100(), formula = @formula(0 ~ 1 + cond), β = [-4, 2])
+r_component =
+ LinearModelComponent(; basis = p100(), formula = @formula(0 ~ 1 + cond), β = [6, 0])
+
+#offset of the stimulus defines the distance between two epochs
+#offsets for the components are applied from the stimulus due to the custom simulate_onsets method
+#the latencies for the first three epochs would be:
+# stimulus = 100, 200, 300
+# c = 110:140, 210:240, 310:340
+# r = 120:160, 220:260, 320:360
+onset_stimulus = UniformOnset(width = 0, offset = 100)
+onset_c = UniformOnset(width = 30, offset = 10)
+onset_r = UniformOnset(width = 40, offset = 20)
+sequence_onset = SequenceOnset(onset_stimulus, [onset_c, onset_r])
+#the components dict has to be consistent with the sequence design, i.e. contain S, C, R
+components =
+ Dict('S' => [s_component_1, s_component_2], 'C' => [c_component], 'R' => [r_component])
+
+#simulate the data
+data, evts = simulate(
+ MersenneTwister(7),
+ sequence_design,
+ components,
+ sequence_onset,
+ PinkNoise(noiselevel = 0.1),
+)
+
+#only keep the S and R events, the C events will be calculated by the RIDE algorithm
+evts_without_c = @subset(evts, :event .== 'S' .|| :event .== 'R')
+
+#plotting
+begin
+ f = Figure(size = (1000, 400))
+ ax = Axis(
+ f[1, 1],
+ title = "Simulated EEG data",
+ titlesize = 18,
+ xlabel = "Time [samples]",
+ ylabel = "Amplitude [µV]",
+ xlabelsize = 16,
+ ylabelsize = 16,
+ xgridvisible = false,
+ ygridvisible = false,
+ limits = ((190, 700), nothing),
+ )
+
+ lines!(data; color = "black")
+
+ #plot the event onsets
+ evts_s = @subset(evts, :event .== 'S')
+ evts_c = @subset(evts, :event .== 'C')
+ evts_r = @subset(evts, :event .== 'R')
+
+ vlines!(
+ ax,
+ evts_s.latency,
+ color = "red",
+ linestyle = :dash,
+ linewidth = 2,
+ label = "Stimulus",
+ )
+ vlines!(
+ ax,
+ evts_c.latency,
+ color = "green",
+ linestyle = :dash,
+ linewidth = 2,
+ label = "C",
+ )
+ vlines!(
+ ax,
+ evts_r.latency,
+ color = "blue",
+ linestyle = :dash,
+ linewidth = 2,
+ label = "R",
+ )
+ axislegend("Event onset"; unique = true)
+ display(f)
+end
\ No newline at end of file
diff --git a/docs/images/classicAndUnfoldTutorialResults.png b/docs/images/classicAndUnfoldTutorialResults.png
new file mode 100644
index 0000000..ad02ce7
Binary files /dev/null and b/docs/images/classicAndUnfoldTutorialResults.png differ
diff --git a/docs/images/simulated_EEG_tutorial.png b/docs/images/simulated_EEG_tutorial.png
new file mode 100644
index 0000000..ab6ef25
Binary files /dev/null and b/docs/images/simulated_EEG_tutorial.png differ
diff --git a/docs/make.jl b/docs/make.jl
index 05d37a3..6904adb 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -18,4 +18,7 @@ makedocs(;
pages = ["index.md"; numbered_pages],
)
-deploydocs(; repo = "github.com/unfoldtoolbox/UnfoldRIDE.jl")
+deploydocs(;
+ repo = "github.com/unfoldtoolbox/UnfoldRIDE.jl",
+ push_preview = true,
+ devbranch = "main")
diff --git a/docs/src/10-data_simulation.md b/docs/src/10-data_simulation.md
new file mode 100644
index 0000000..8da388c
--- /dev/null
+++ b/docs/src/10-data_simulation.md
@@ -0,0 +1,173 @@
+# Simulation with Variable Latency Components
+
+You can find the code for this tutorial [here](../code/simulate_variable_latency_sequence.jl), using this [Project.toml](../code/Project.toml).
+
+
+
+To properly run the RIDE algorithm, we need a dataset including at least one component with a variable latency. [UnfoldSim](https://github.com/unfoldtoolbox/UnfoldSim.jl/tree/main) can be used to generate the EEG data and the ```SequenceDesign``` allows us to define a sequence of components with one shared onset.
+
+To simulate a variable latency, we need a bit more control over the onset of each component. The following code creates a new ```SequenceOnset```, which allows us to define one onset for the stimulus and individual onsets for each component. In addition to defining individual onsets, we also modify the ```simulate_onsets``` function to apply all our component onsets from the stimulus onset. The default behaviour would be to simply apply the onsets from the previous component, which wouldn't make any sense for this scenario.
+
+```julia
+@with_kw struct SequenceOnset <: AbstractOnset
+ stimulus_onset::AbstractOnset
+ components_onset::Vector{AbstractOnset}
+end
+
+function UnfoldSim.simulate_onsets(rng, onset::SequenceOnset, simulation::Simulation)
+ #calculate stimulus onsets
+ stimulus_onsets =
+ simulate_interonset_distances(rng, onset.stimulus_onset, simulation.design)
+ stimulus_offset_accumulated = accumulate(+, stimulus_onsets, dims = 1, init = 0)
+
+ #calculate component offsets
+ components_onsets = Vector{Vector{Int}}()
+ for obj in onset.components_onset
+ Random.seed!(rng, rand(rng, 1:10000))
+ push!(components_onsets, simulate_interonset_distances(rng, obj, simulation.design))
+ end
+
+ #combine the stimulus offsets and component offsets into one vector
+ result = Vector{Int}()
+ for i in axes(stimulus_offset_accumulated, 1)
+ current_offset = stimulus_offset_accumulated[i]
+ push!(result, current_offset)
+ for component_onsets in components_onsets
+ push!(result, current_offset + component_onsets[i])
+ end
+ end
+
+ #cut result to the design size
+ result = result[1:size(simulation.design)]
+ return result
+end
+```
+
+
+In RIDE, we generally differentiate between three different component clusters: S,C and R:
+- S represents the Stimulus
+- R represents a response to the Stimulus with a known variable latency
+- C represents a response to the Stimulus with an uknown variable latency
+
+We define components for each of these component clusters and use [UnfoldSim](https://github.com/unfoldtoolbox/UnfoldSim.jl/tree/main) to model them. If there are questions about the next code section, we recommend checking out the [UnfoldSim documentation](https://unfoldtoolbox.github.io/UnfoldSim.jl/stable/generated/tutorials/quickstart/#Specify-the-simulation-ingredients).
+
+```julia
+#Define the design
+design =
+ SingleSubjectDesign(; conditions = Dict(:cond => ["car", "face"])) |>
+ x -> RepeatDesign(x, 4)
+
+#Create a sequence design with three components (S, C, R)
+sequence_design = SequenceDesign(design, "SCR")
+
+# Define the components
+s_component_1 =
+ LinearModelComponent(; basis = vcat(p100()), formula = @formula(0 ~ 1), β = [2])
+s_component_2 =
+ LinearModelComponent(; basis = n170(), formula = @formula(0 ~ 1 + cond), β = [2, 0])
+c_component =
+ LinearModelComponent(; basis = p100(), formula = @formula(0 ~ 1 + cond), β = [-4, 2])
+r_component =
+ LinearModelComponent(; basis = p100(), formula = @formula(0 ~ 1 + cond), β = [6, 0])
+
+#offset of the stimulus defines the distance between two epochs
+#offsets for the components are applied from the stimulus due to the custom simulate_onsets method
+#the latencies for the first three epochs would be:
+# stimulus = 100, 200, 300
+# c = 110:140, 210:240, 310:340
+# r = 120:160, 220:260, 320:360
+onset_stimulus = UniformOnset(width = 0, offset = 100)
+onset_c = UniformOnset(width = 30, offset = 10)
+onset_r = UniformOnset(width = 40, offset = 20)
+sequence_onset = SequenceOnset(onset_stimulus, [onset_c, onset_r])
+#the components dict has to be consistent with the sequence design, i.e. contain S, C, R
+components =
+ Dict('S' => [s_component_1, s_component_2], 'C' => [c_component], 'R' => [r_component])
+
+#simulate the data
+data, evts = simulate(
+ MersenneTwister(7),
+ sequence_design,
+ components,
+ sequence_onset,
+ PinkNoise(noiselevel = 0.1),
+)
+```
+
+Running the code generates the following data. The noise is set to a relatively low 0.1 compared to the max amplitude of 6, which clearly shows the individual components. Note how the C and R component always appear after the Stimulus, but the order of C and R isn't consistent. It is entirely determined by our onset definitions in the previous code segment.
+
+
+Code used for Graph Creation
+
+```julia
+#plotting
+begin
+ f = Figure(size = (1000, 400))
+ ax = Axis(
+ f[1, 1],
+ title = "Simulated EEG data",
+ titlesize = 18,
+ xlabel = "Time [samples]",
+ ylabel = "Amplitude [µV]",
+ xlabelsize = 16,
+ ylabelsize = 16,
+ xgridvisible = false,
+ ygridvisible = false,
+ limits = ((90, 390), nothing),
+ )
+
+ lines!(data; color = "black")
+
+ #plot the event onsets
+ evts_s = @subset(evts, :event .== 'S')
+ evts_c = @subset(evts, :event .== 'C')
+ evts_r = @subset(evts, :event .== 'R')
+
+ vlines!(
+ ax,
+ evts_s.latency,
+ color = "red",
+ linestyle = :dash,
+ linewidth = 2,
+ label = "Stimulus",
+ )
+ vlines!(
+ ax,
+ evts_c.latency,
+ color = "green",
+ linestyle = :dash,
+ linewidth = 2,
+ label = "C",
+ )
+ vlines!(
+ ax,
+ evts_r.latency,
+ color = "blue",
+ linestyle = :dash,
+ linewidth = 2,
+ label = "R",
+ )
+ axislegend("Event onset"; unique = true)
+ display(f)
+end
+```
+
+
+
+
+
+Finally, to run the RIDE algorithm, we need to remove the C events from the evts dataframe. They will be estimated during the algorithm and can then be compared to the actual latencies.
+
+```julia
+#only keep the S and R events, the C events will be calculated by the RIDE algorithm
+evts_without_c = @subset(evts, :event .== 'S' .|| :event .== 'R')
+```
\ No newline at end of file
diff --git a/docs/src/11-running_ride.md b/docs/src/11-running_ride.md
new file mode 100644
index 0000000..030b4f2
--- /dev/null
+++ b/docs/src/11-running_ride.md
@@ -0,0 +1,80 @@
+# Running Classic and Unfold RIDE
+
+You can find the code for this tutorial [here](../code/running_ride.jl), using this [Project.toml](../code/Project.toml).
+
+For this tutorial, we use the data from the [data simulation example](./10-data_simulation.md).
+First we include the data simulation and add some additional noise to the data
+
+```julia
+include("simulate_variable_latency_sequence.jl")
+
+#add some noise to the simulated data
+data_noisy = copy(data)
+UnfoldSim.add_noise!(MersenneTwister(1234), PinkNoise(; noiselevel = 0.9), data_noisy)
+```
+
+Now that we have our ```data_noisy``` and ```evts_without_c``` we can define a suitable configuration for ride and run the algorithm. The ranges for the individual components have to be determined through manual observation of the data.
+
+```julia
+#run the ride algorithm on the simulated data
+begin
+ #config for ride algorithm
+ cfg = RideConfig(
+ #sfreq is the sampling frequency of the data
+ sfreq = 100,
+ #ranges for the individual components are determined by manually inspecting the data
+ s_range = [-0.1, 0.3],
+ r_range = [0, 0.4],
+ c_range = [-0.4, 0.4],
+ #the range in which the initial peak estimation for the C component is performed
+ c_estimation_range = [0, 0.9],
+ #the range for one epoch
+ epoch_range = [-0.1, 1]
+ )
+ #run the ride algorithm
+ #We only have one channel, so we only need the first entry from the results vector.
+ resultsClassic = ride_algorithm(ClassicMode, data_noisy, evts_without_c, cfg)[1]
+ resultsUnfold = ride_algorithm(UnfoldMode, data_noisy, evts_without_c, cfg)[1]
+end
+```
+
+Finally we can plot the results of both algorithm modes.
+
+Code used for Graph Creation
+
+```julia
+#plot the results
+begin
+ f = Figure(size = (1000, 400))
+
+ #plot classic results
+ ax = Axis(f[1, 1], yticks = -100:100, title="Classic RIDE")
+ raw = lines!(resultsClassic.raw_erp; color = "black", linewidth = 3, label="ERP")
+ s = lines!(resultsClassic.s_erp; color = "blue", label="S")
+ c = lines!(resultsClassic.c_erp; color = "red", label="C")
+ r = lines!(resultsClassic.r_erp; color = "green", label="R")
+
+ #plot unfold results
+ ax = Axis(f[1, 2], yticks = -100:100, title="Unfold RIDE")
+ raw = lines!(resultsUnfold.raw_erp; color = "black", linewidth = 3, label="ERP")
+ s = lines!(resultsUnfold.s_erp; color = "blue", label="S")
+ c = lines!(resultsUnfold.c_erp; color = "red", label="C")
+ r = lines!(resultsUnfold.r_erp; color = "green", label="R")
+ axislegend(ax)
+
+ display(f)
+end
+```
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/src/index.md b/docs/src/index.md
index 88d609a..7ddd2e8 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -2,19 +2,62 @@
CurrentModule = UnfoldRIDE
```
-# UnfoldRIDE
+# UnfoldRIDE.jl Documentation
-Documentation for [UnfoldRIDE](https://github.com/unfoldtoolbox/UnfoldRIDE.jl).
-
-## Contributors
+Welcome to the documentation for [UnfoldRIDE](https://github.com/unfoldtoolbox/UnfoldRIDE.jl), a re-implementation of the [RIDE](https://cns.hkbu.edu.hk/RIDE.htm) algorithm in Julia with an extension to replace the RIDEs iterative decomposition with an [Unfold](https://github.com/unfoldtoolbox/Unfold.jl) deconvolution.
```@raw html
-
-
-
+
+
+```
+
+## Key features
+- Component Decomposition using the RIDE method
+- C-Latency Estimation
+- RIDE with an Unfold deconvolution
+
+## Installation
+```julia-repl
+julia> using Pkg; Pkg.add("UnfoldRIDE")
+```
+For more detailed instructions please refer to [Installing Julia & Unfold Packages](https://unfoldtoolbox.github.io/UnfoldDocs/main/installation/).
+
-
-
+## Usage example
-
+```Julia
+#config for ride algorithm
+cfg = RideConfig(
+ #sfreq is the sampling frequency of the data
+ sfreq = 100,
+ #ranges for the individual components have to be determined through manual inspection of the data
+ s_range = [-0.1, 0.3],
+ r_range = [0, 0.4],
+ c_range = [-0.4, 0.4],
+ #the range in which the initial peak estimation for the C component is performed
+ c_estimation_range = [0, 0.9],
+ #the range for one epoch
+ epoch_range = [-0.1, 1]
+)
+#run the ride algorithm
+resultsClassic = ride_algorithm(ClassicMode, data_noisy, evts_without_c, cfg)
+resultsUnfold = ride_algorithm(UnfoldMode, data_noisy, evts_without_c, cfg)
```
+
+## Where to start: Learning roadmap
+### 1. First steps
+🔗 [Data Simulation](10-data_simulation.md)
+
+🔗 [Running UnfoldRIDE](11-running_ride.md)
+
+### 2. Intermediate topics
+📌 Goal:
+🔗
+
+### 3. Advanced topics
+📌 Goal:
+🔗
+
+
+## Statement of need
+TBD
\ No newline at end of file
diff --git a/src/UnfoldRIDE.jl b/src/UnfoldRIDE.jl
index 2694c1e..86aed6a 100644
--- a/src/UnfoldRIDE.jl
+++ b/src/UnfoldRIDE.jl
@@ -1,11 +1,28 @@
module UnfoldRIDE
-"""
- hi = hello_world()
-A simple function to return "Hello, World!"
-"""
-function hello_world()
- return "Hello, World!"
-end
-
-end
+using Random
+using Unfold
+using Parameters
+using DataFrames
+using DataFramesMeta
+using Statistics
+using DSP
+using Peaks
+using Distributions
+using FFTW
+
+
+include("./ride/ride_structs.jl")
+include("./ride/ride_classic_algorithm.jl")
+include("./ride/ride_classic_methods.jl")
+include("./ride/ride_unfold_algorithm.jl")
+include("./ride/ride_unfold_methods.jl")
+include("./ride/ride_shared_methods.jl")
+
+# Export functions
+export ride_algorithm, ride_algorithm_unfold
+
+# Export types
+export RideConfig, ClassicMode, UnfoldMode
+
+end
\ No newline at end of file
diff --git a/src/ride/ride_classic_algorithm.jl b/src/ride/ride_classic_algorithm.jl
new file mode 100644
index 0000000..34e6cc4
--- /dev/null
+++ b/src/ride/ride_classic_algorithm.jl
@@ -0,0 +1,253 @@
+ride_algorithm(Modus::Type{ClassicMode}, data::Vector{Float64}, evts; kwargs...) =
+ ride_algorithm(Modus, data, evts, RideConfig(; kwargs...))
+
+function ride_algorithm(
+ Modus::Type{ClassicMode},
+ data::Array{Float64,2},
+ evts,
+ cfg::RideConfig,
+)
+ r = Vector()
+ for i in range(1, size(data, 1))
+ push!(r, ride_algorithm(Modus, data[i, :], evts, cfg)[1])
+ end
+ return r
+end
+
+function ride_algorithm(
+ Modus::Type{ClassicMode},
+ data::Vector{Float64},
+ evts,
+ cfg::RideConfig,
+)
+ @debug "Running RIDE algorithm with cfg: $cfg"
+
+ @assert cfg.s_range[1] >= cfg.epoch_range[1] && cfg.s_range[2] <= cfg.epoch_range[2] "S range must be within the epoch range"
+ @assert cfg.c_estimation_range[1] >= cfg.epoch_range[1] &&
+ cfg.c_estimation_range[2] <= cfg.epoch_range[2] "C estimation range must be within the epoch range"
+
+ ## data_preparation
+ data_reshaped = reshape(data, (1, :))
+ evts_s = @subset(evts, :event .== 'S')
+ evts_r = @subset(evts, :event .== 'R')
+ interim_results = Vector{RideResults}()
+
+ #epoch data with the cfg.epoch_range to see how many epochs we have
+ #cut evts to match the determined number of epochs
+ #the resulting data_epoched is also used for the c latency estimation
+ data_epoched, data_epoched_times = Unfold.epoch(
+ data = data_reshaped,
+ tbl = evts_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )
+ n, data_epoched = Unfold.drop_missing_epochs(evts_s, data_epoched)
+ number_epochs = size(data_epoched, 3)
+ raw_erp = mean(data_epoched, dims = 3)[1, :, 1]
+ #@assert size(evts) == (number_epochs * 2) "Size of evts is $(size(evts)) but should be $(number_epochs * 2)"
+ evts_s = evts_s[1:number_epochs, :]
+ evts_r = evts_r[1:number_epochs, :]
+
+ #reduce evts to the number of epochs
+ while size(evts, 1) > number_epochs * 2
+ deleteat!(evts, size(evts, 1))
+ end
+ @assert size(evts, 1) == number_epochs * 2 "Size of evts is $(size(evts,1)) but should be $(number_epochs*2)"
+ ##
+
+ ## initial C latency estimation
+ c_latencies_df = initial_peak_estimation(data, evts_s, cfg)
+ evts_c = build_c_evts_table(c_latencies_df, evts_s, cfg)
+ c_range_adj = c_range_adjusted(cfg.c_range)
+ ##
+
+ ## initial erp calculation
+ #calculate initial erp of S
+ data_epoched_s, data_epoched_s_times =
+ Unfold.epoch(data = data_reshaped, tbl = evts_s, τ = cfg.s_range, sfreq = cfg.sfreq)
+ n, data_epoched_s = Unfold.drop_missing_epochs(evts_s, data_epoched_s)
+ s_erp = median(data_epoched_s, dims = 3)
+
+ #calculate initial erp of R
+ data_epoched_r, data_epoched_r_times =
+ Unfold.epoch(data = data_reshaped, tbl = evts_r, τ = cfg.r_range, sfreq = cfg.sfreq)
+ n, data_epoched_r = Unfold.drop_missing_epochs(evts_r, data_epoched_r)
+ r_erp = median(data_epoched_r, dims = 3)
+
+ ## save interim results
+ if cfg.save_interim_results
+ push!(
+ interim_results,
+ create_results(
+ evts,
+ raw_erp,
+ s_erp[1, :, 1],
+ r_erp[1, :, 1],
+ zeros(1),
+ c_latencies_df,
+ cfg,
+ ),
+ )
+ end
+
+ c_latencies_df_prev_prev = nothing
+ c_latencies_df_prev = nothing
+
+ ## iteration start
+ for i in range(1, cfg.iteration_limit)
+ ## decompose data into S, R and C components using the current C latencies
+ for j = 1:25
+ #calculate erp of C by subtracting S and R from the data
+ data_subtracted_s_and_r = subtract_to_data_epoched(
+ data_reshaped,
+ evts_c,
+ c_range_adj,
+ [(evts_s, s_erp, cfg.s_range), (evts_r, r_erp, cfg.r_range)],
+ cfg.sfreq,
+ )
+ c_erp = median(data_subtracted_s_and_r, dims = 3)
+ #calculate erp of S
+ data_subtracted_c_and_r = subtract_to_data_epoched(
+ data_reshaped,
+ evts_s,
+ cfg.s_range,
+ [(evts_c, c_erp, c_range_adj), (evts_r, r_erp, cfg.r_range)],
+ cfg.sfreq,
+ )
+ s_erp = median(data_subtracted_c_and_r, dims = 3)
+ #calculate erp of R
+ data_subtracted_s_and_c = subtract_to_data_epoched(
+ data_reshaped,
+ evts_r,
+ cfg.r_range,
+ [(evts_s, s_erp, cfg.s_range), (evts_c, c_erp, c_range_adj)],
+ cfg.sfreq,
+ )
+ r_erp = median(data_subtracted_s_and_c, dims = 3)
+ end
+ ##
+
+ ## update C latencies via pattern matching
+ #perform the pattern matching on the data with the S and R components subtracted
+ data_subtracted_s_and_r = subtract_to_data(
+ data_reshaped,
+ [(evts_s, s_erp, cfg.s_range), (evts_r, r_erp, cfg.r_range)],
+ cfg.sfreq,
+ )
+ if cfg.filtering
+ data_subtracted_s_and_r = dspfilter(data_subtracted_s_and_r[1, :], 5, 20)
+ end
+ data_epoched_subtracted_s_and_r, n = Unfold.epoch(
+ data = data_subtracted_s_and_r,
+ tbl = evts_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )
+ n, data_epoched_subtracted_s_and_r =
+ Unfold.drop_missing_epochs(evts_s, data_epoched_subtracted_s_and_r)
+ xcorr, m, onset =
+ findxcorrpeak(data_epoched_subtracted_s_and_r[1, :, :], c_erp[1, :, 1])
+ c_latencies = reshape(m, (1, :))
+ for (i, row) in enumerate(eachrow(c_latencies_df))
+ if (row.fixed)
+ continue
+ end
+ row.latency = c_latencies[i]
+ end
+ ##
+
+ ## heuristics
+ if cfg.heuristic1 &&
+ !isnothing(c_latencies_df_prev) &&
+ !isnothing(c_latencies_df_prev_prev)
+ heuristic1_monoton_latency_changes!(
+ c_latencies_df,
+ c_latencies_df_prev,
+ c_latencies_df_prev_prev,
+ )
+ end
+
+ if cfg.heuristic2 && !isnothing(c_latencies_df_prev)
+ heuristic2_randomize_latency_on_convex_xcorr!(
+ c_latencies_df,
+ c_latencies_df_prev,
+ xcorr,
+ cfg.heuristic2_rng,
+ )
+ end
+
+ if cfg.heuristic3 && !isnothing(c_latencies_df_prev)
+ heuristic3_pick_closest_xcorr_peak!(
+ c_latencies_df,
+ c_latencies_df_prev,
+ xcorr;
+ equality_threshold = cfg.heuristic3_threshhold,
+ onset = onset,
+ )
+ end
+
+ c_latencies_df_prev_prev = deepcopy(c_latencies_df_prev)
+ c_latencies_df_prev = deepcopy(c_latencies_df)
+ evts_c = build_c_evts_table(c_latencies_df, evts_s, cfg)
+ ##
+
+ ## save interim results
+ if cfg.save_interim_results
+ push!(
+ interim_results,
+ create_results(
+ evts,
+ raw_erp,
+ s_erp[1, :, 1],
+ r_erp[1, :, 1],
+ c_erp[1, :, 1],
+ c_latencies_df,
+ cfg,
+ ),
+ )
+ end
+ end
+
+ ##last iteration using the mean instead of median
+ #calculate erp of C by subtracting S and R from the data
+ data_subtracted_s_and_r = subtract_to_data_epoched(
+ data_reshaped,
+ evts_c,
+ c_range_adj,
+ [(evts_s, s_erp, cfg.s_range), (evts_r, r_erp, cfg.r_range)],
+ cfg.sfreq,
+ )
+ c_erp = mean(data_subtracted_s_and_r, dims = 3)
+ #calculate erp of S
+ data_subtracted_c_and_r = subtract_to_data_epoched(
+ data_reshaped,
+ evts_s,
+ cfg.s_range,
+ [(evts_c, c_erp, c_range_adj), (evts_r, r_erp, cfg.r_range)],
+ cfg.sfreq,
+ )
+ s_erp = mean(data_subtracted_c_and_r, dims = 3)
+ #calculate erp of R
+ data_subtracted_s_and_c = subtract_to_data_epoched(
+ data_reshaped,
+ evts_r,
+ cfg.r_range,
+ [(evts_s, s_erp, cfg.s_range), (evts_c, c_erp, c_range_adj)],
+ cfg.sfreq,
+ )
+ r_erp = mean(data_subtracted_s_and_c, dims = 3)
+ ##
+
+ results = create_results(
+ evts,
+ raw_erp,
+ s_erp[1, :, 1],
+ r_erp[1, :, 1],
+ c_erp[1, :, 1],
+ c_latencies_df,
+ cfg,
+ )
+ results.interim_results = interim_results
+
+ return [results]
+end
\ No newline at end of file
diff --git a/src/ride/ride_classic_methods.jl b/src/ride/ride_classic_methods.jl
new file mode 100644
index 0000000..addc8ec
--- /dev/null
+++ b/src/ride/ride_classic_methods.jl
@@ -0,0 +1,36 @@
+function subtract_to_data(data, others_evts_erp_tuples, sfreq)
+ data_subtracted = copy(data)
+ for (evts, erp, range) in others_evts_erp_tuples
+ for i in evts.latency
+ sub_range =
+ i+round(Int, range[1] * sfreq):i+round(Int, (range[1] * sfreq))+size(
+ erp[1, :, 1],
+ )[1]-1
+ if sub_range[end] > length(data_subtracted)
+ continue
+ end
+ data_subtracted[1, sub_range] -= erp[1, :, 1]
+ end
+ end
+ return data_subtracted
+end
+
+function subtract_to_data_epoched(
+ data,
+ target_evts,
+ target_range,
+ others_evts_erp_tuples,
+ sfreq,
+)
+ data_subtracted = subtract_to_data(data, others_evts_erp_tuples, sfreq)
+ data_epoched_subtracted, n = Unfold.epoch(
+ data = data_subtracted,
+ tbl = target_evts,
+ τ = target_range,
+ sfreq = sfreq,
+ )
+ n, data_epoched_subtracted =
+ Unfold.drop_missing_epochs(target_evts, data_epoched_subtracted)
+ #new_erp = median(data_epoched_subtracted, dims = 3)
+ return data_epoched_subtracted
+end
\ No newline at end of file
diff --git a/src/ride/ride_shared_methods.jl b/src/ride/ride_shared_methods.jl
new file mode 100644
index 0000000..fba4a14
--- /dev/null
+++ b/src/ride/ride_shared_methods.jl
@@ -0,0 +1,424 @@
+"""
+ findxcorrpeak(d::Matrix, kernel::Vector; window = false)
+
+Calculate the cross correlation between the data and the kernel for each epoch and find the peak.
+
+# Arguments
+- `data::Matrix{Float64}`: Input data in shape (samples, epochs)
+- `kernel::Vector{Float64}`: Kernel to cross correlate with the data.
+
+# Keyword arguments
+- `window::Bool = false`: Apply a Hanning window to the kernel before cross-correlation,
+ favoring central values of the kernel.
+
+# Returns
+- `xc::Vector{Vector{Float64}}` : Cross correlation result per epoch.
+- `maxima::Vector{Int}` : Maxima of the cross correlation per epoch.
+- `onset::Int` : Onset of the kernel.
+"""
+function findxcorrpeak(
+ data::Union{Matrix{Float64},Vector{Float64}},
+ kernel::Vector{Float64};
+ window::Bool = false,
+)
+ #the purpose of this method is to find the peak of the cross correlation between the kernel and the data
+ #kernel = C component erp. Hanning is applied to factor the center of the C erp more than the edges.
+ weightedkernel = window ? kernel .* hanning(length(kernel)) : kernel
+ xc::Vector{Vector{Float64}} =
+ xcorr.(eachcol(data), Ref(weightedkernel); padmode = :none)
+ onset::Int = length(kernel)
+ maxima::Vector{Int} = [findmax(x)[2] for x in xc] .- onset
+ return xc, maxima, onset
+end
+
+"""
+ initial_peak_estimation(data_continous::Matrix{Float64}, evts::DataFrame, cfg::RideConfig)
+
+Multi channel version of initial_peak_estimation.
+
+# Arguments
+- `data_continous::Matrix{Float64}`: Input data ins shape (channels, samples).
+- `evts::DataFrame`: Event table. S events are extracted from this table.
+- `cfg::RideConfig`: Configuration for the RIDE algorithm.
+
+# Returns
+- `latencies_df_vector::Vector{DataFrame}` : Vector of DataFrames containing the estimated latencies for every channel. Latencies are from the start of the epoch to the beginning of the c_range.
+"""
+function initial_peak_estimation(
+ data_continous::Matrix{Float64},
+ evts::DataFrame,
+ cfg::RideConfig,
+)
+ latencies_df_vector = Vector()
+ for i = 1:size(data_continous, 1)
+ push!(latencies_df_vector, initial_peak_estimation(data_continous[i, :], evts, cfg))
+ end
+ return latencies_df_vector
+end
+
+"""
+ initial_peak_estimation(data_continous::Array{Float64}, evts::DataFrame, cfg::RideConfig)
+
+Estimates the C latencies of every epoch using peak picking in the given cfg.c_estimation_range.
+The resulting latencies are from the start of the epoch to the beginning of the c_range.
+
+# Arguments
+- `data_continous::Array{Float64}`: Continous data on which the peak picking is performed.
+- `evts::DataFrame`: Event table. S events are extracted from this table.
+- `cfg::RideConfig`: Configuration for the RIDE algorithm.
+
+# Returns
+- `latencies_df::DataFrame` : DataFrame containing the estimated latencies for every epoch and a fixed=false column. Latencies are from the start of the epoch to the beginning of the c_range.
+"""
+function initial_peak_estimation(
+ data_continous::Array{Float64},
+ evts::DataFrame,
+ cfg::RideConfig,
+)
+ evts_s = @subset(evts, :event .== 'S')
+ data_residuals_epoched, times = Unfold.epoch(
+ data = data_continous,
+ tbl = evts_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )
+ n, data_residuals_epoched = Unfold.drop_missing_epochs(evts_s, data_residuals_epoched)
+ c_latencies = Vector{Float64}(undef, size(data_residuals_epoched, 3))
+ #Peak estimation for initial c latencies for every epoch
+ for a in (1:size(data_residuals_epoched, 3))
+ range_start =
+ round(Int, (cfg.c_estimation_range[1] - cfg.epoch_range[1]) * cfg.sfreq)
+ range_end = round(Int, (cfg.c_estimation_range[2] - cfg.epoch_range[1]) * cfg.sfreq)
+ range = range_start:range_end
+ #find maximum in the given c_estimation range
+ maximum = findmax(abs.(data_residuals_epoched[1, range, a]))[2]
+ #format the latency to be from epoch start to the start of the c_range window
+ c_latencies[a] = maximum + range[1] - 1 + round(Int, cfg.c_range[1] * cfg.sfreq)
+ end
+ latencies_df = DataFrame(latency = c_latencies, fixed = false)
+ return latencies_df
+end
+
+"""
+ c_range_adjusted(c_range::Vector{Float64})
+
+Helper Function to adjust the c_range to be from 0 to the difference of the original c_range.
+This simplifies several of the algorithms functions without the need to create a new variable.
+"""
+function c_range_adjusted(c_range::Vector{Float64})
+ return [0, c_range[2] - c_range[1]]
+end
+
+"""
+ build_c_evts_table(latencies_df_vector::Vector, evts::DataFrame, cfg::RideConfig)
+
+Create C event table by copying S and adding the estimated latency.
+
+This version is used for multi channel data. It calculates the mean of all channels as the estimated latencies.
+
+# Arguments
+- `latencies_df_vector::Vector`: Vector of DataFrames containing the Latency field. Latencies from epoch start are expected.
+- `evts::DataFrame`: Event table. S events are extracted from this table.
+
+# Returns
+- `evts_c::DataFrame` : New event table containing only the C events.
+"""
+function build_c_evts_table(latencies_df_vector::Vector, evts::DataFrame, cfg::RideConfig)
+ #calculate the mean of the latencies over all channels
+ latencies_mean = deepcopy(latencies_df_vector[1])
+ for i in range(2, size(latencies_df_vector, 1))
+ latencies_mean.latency .+= latencies_df_vector[i].latency
+ end
+ latencies_mean.latency ./= size(latencies_df_vector, 1)
+ #use the usual build_c_evts_table function with the mean latencies
+ return build_c_evts_table(latencies_mean, evts, cfg)
+end
+
+"""
+ build_c_evts_table(latencies_df::DataFrame, evts, cfg::RideConfig)
+
+Create C event table by copying S and adding the estimated latency.
+
+# Arguments
+- `latencies_df::DataFrame`: DataFrame containing the Latency field. Latencies from epoch start are expected.
+- `evts::DataFrame`: Event table. S events are extracted from this table.
+
+# Returns
+- `evts_c::DataFrame` : New event table containing only the C events.
+"""
+function build_c_evts_table(latencies_df::DataFrame, evts::DataFrame, cfg::RideConfig)
+ evts_s = @subset(evts, :event .== 'S')
+ @assert size(latencies_df, 1) == size(evts_s, 1) "latencies_df and evts_s must have the same size"
+ evts_c = copy(evts_s)
+ evts_c[!, :latency] .=
+ round.(
+ Int,
+ evts_s[!, :latency] + latencies_df[!, :latency] .+
+ (cfg.epoch_range[1] * cfg.sfreq),
+ )
+ evts_c[!, :event] .= 'C'
+ return evts_c
+end
+
+"""
+ heuristic1_monoton_latency_changes!(latencies_df::DataFrame, latencies_df_old::DataFrame, latencies_df_old_old::DataFrame)
+
+Assure tha the changes in the latencies are monoton, i.e. they always change in one direction.
+If a non monoton change is detected, revert the change and set the latency as fixed in the DataFrame.
+
+# Arguments
+- `latencies_df::DataFrame`: The current latencies DataFrame.
+- `latencies_df_old::DataFrame`: The previous latencies DataFrame, used to calculate the new change in latency.
+- `latencies_df_old_old::DataFrame`: The latencies DataFrame before the previous one, used to calculate the previous change in latency.
+"""
+function heuristic1_monoton_latency_changes!(
+ latencies_df::DataFrame,
+ latencies_df_old::DataFrame,
+ latencies_df_old_old::DataFrame,
+)
+ for (i, row) in enumerate(eachrow(latencies_df))
+ if row.fixed
+ continue
+ end
+ prev_change = latencies_df_old.latency[i] - latencies_df_old_old.latency[i]
+ new_change = row.latency - latencies_df_old.latency[i]
+ if prev_change > 0 && new_change < 0 || prev_change < 0 && new_change > 0
+ row.latency = latencies_df_old.latency[i]
+ row.fixed = true
+ @debug "heuristic1 reverted non-monoton latency change for epoch $i"
+ end
+ end
+end
+
+"""
+ heuristic2_randomize_latency_on_convex_xcorr!(latencies_df, latencies_df_old, xcorr, rng = MersenneTwister(1234),)
+
+Assure that the cross correlation is not convex, randomize the latency if it is.
+
+Check if the cross correlation is convex by searching for peaks in the cross correlation.
+When no peak is found, the xcorrelation is considered convex and
+the latency is randomized with a gaussian distribution over the previous latencies.
+
+# Arguments
+- `latencies_df::DataFrame`: The current latencies DataFrame.
+- `latencies_df_old::DataFrame`: The previous latencies DataFrame.
+ Used to calculate the gaussian distribution for the randomization.
+- `xcorr::Vector{Vector{Float64}}`: Cross correlation results for every epoch.
+
+# Keyword arguments
+- `rng::AbstractRNG = MersenneTwister(1234)`: RNG used for random latency generation.
+"""
+function heuristic2_randomize_latency_on_convex_xcorr!(
+ latencies_df::DataFrame,
+ latencies_df_old::DataFrame,
+ xcorr::Vector{Vector{Float64}},
+ rng::AbstractRNG = MersenneTwister(1234),
+)
+ @assert size(latencies_df, 1) == size(xcorr, 1) "latencies_df and xcorr must have the same size"
+ ##you cannot calculate a standard deviation with less than 2 values
+ standard_deviation = std(latencies_df_old.latency)
+ if (isnan(standard_deviation))
+ standard_deviation = 1
+ end
+ normal_distribution = Normal(mean(latencies_df_old.latency), standard_deviation)
+ for (i, row) in enumerate(eachrow(latencies_df))
+ if row.fixed
+ continue
+ end
+ maxima = findmaxima(xcorr[i])
+ maxima_length = length(maxima.indices)
+ if maxima_length == 0
+ row.latency = round(Int, rand(rng, normal_distribution))
+ row.fixed = true
+ @debug "heuristic2 randomized latency for epoch $i"
+ end
+ end
+end
+
+"""
+ heuristic3_pick_closest_xcorr_peak!(latencies_df::DataFrame, latencies_df_old::DataFrame, xcorr::Vector{Vector{Float64}}; equality_threshold::Float64 = 0.9, onset::Int64 = 0)
+
+Check for multiple "competing" peaks in the cross correlation and pick the closest one to the previous latency.
+
+Any peak with a value > maximum * equality_threshold is considered a competing peak.
+Latencies marked as fixed are skipped.
+
+# Arguments
+- `latencies_df::DataFrame`: The current latencies DataFrame.
+- `latencies_df_old::DataFrame`: The previous latencies DataFrame.
+ Necessary to calculate the closest peak to the previous latency.
+- `xcorr::Vector{Vector{Float64}}`: Cross correlation results for every epoch.
+
+# Keyword arguments (if needed)
+- `equality_threshold::Float64 = 0.9`: Threshold to determine which peaks are considered as competing.
+ Any peak with a value > maximum * equality_threshold is considered a competing peak, with maximum being the highest peak.
+ The threshold must be between 0 and 1.
+- `onset::Int64 = 0`: Onset of the kernel used in the cross correlation. Used to bring the latencies into the proper format.
+"""
+function heuristic3_pick_closest_xcorr_peak!(
+ latencies_df::DataFrame,
+ latencies_df_old::DataFrame,
+ xcorr::Vector{Vector{Float64}};
+ equality_threshold::Float64 = 0.9,
+ onset::Int64 = 0,
+)
+ @assert size(latencies_df, 1) == size(xcorr, 1) "latencies_df and xcorr must have the same size"
+ @assert size(latencies_df_old, 1) == size(latencies_df, 1) "latencies_df and latencies_df_old must have the same size"
+ @assert equality_threshold > 0 && equality_threshold <= 1 "equality_threshold must be between 0 and 1"
+ for (i, row) in enumerate(eachrow(latencies_df_old))
+ # skip fixed entries
+ if row.fixed
+ continue
+ end
+ maxima = findmaxima(xcorr[i])
+ if isempty(maxima)
+ continue
+ end
+ maximum_value = findmax(maxima.heights)[1]
+ competing_peaks = []
+ for (j, peak) in enumerate(maxima.indices)
+ if xcorr[i][peak] > maximum_value * equality_threshold
+ push!(competing_peaks, peak - onset)
+ end
+ end
+ if isempty(competing_peaks)
+ continue
+ end
+ closest_peak = argmin(abs.(competing_peaks .- row.latency))
+ latencies_df.latency[i] = competing_peaks[closest_peak]
+ end
+end
+
+
+"""
+ dspfilter(signal_to_filter::Vector{Float64}, filter_at::Int64, sampling_rate::Int64)
+
+Filter the given signal with a lowpass filter at the given sampling rate.
+"""
+function dspfilter(
+ signal_to_filter::Vector{Float64},
+ filter_at::Int64,
+ sampling_rate::Int64,
+)
+ @assert filter_at * 2 < sampling_rate "Filter frequency must be less than half the sampling rate"
+ a = signal_to_filter
+ p = round(
+ 3.3 / (
+ min(max(filter_at * 0.25, 2.0), sampling_rate / 2 - filter_at) / sampling_rate
+ ),
+ )
+ order = Int(p)
+ order = Int(order ÷ 2 * 2) # we need even filter order
+
+ f = DSP.Filters.digitalfilter(
+ Lowpass(filter_at / (sampling_rate / 2)),
+ FIRWindow(DSP.hanning(order)),
+ )
+ b::Vector{Float64} = filtfilt(f, a)
+
+ return b
+end
+
+"""
+ save_interim_results!(results_vector::Vector{Vector}, evts, raw_erp::Matrix, s_erp::Matrix, r_erp::Matrix, c_erp::Matrix, c_latencies_df::Vector, cfg::RideConfig)
+
+Helper function to create a RideResults object for every channel and add it to the given results_vector.
+"""
+function save_interim_results!(
+ results_vector::Vector{Vector},
+ evts,
+ raw_erp::Matrix,
+ s_erp::Matrix,
+ r_erp::Matrix,
+ c_erp::Matrix,
+ c_latencies_df::Vector,
+ cfg::RideConfig,
+)
+ for i in axes(results_vector, 1)
+ r = create_results(
+ evts,
+ raw_erp[i, :],
+ s_erp[i, :],
+ r_erp[i, :],
+ c_erp[i, :],
+ c_latencies_df[i],
+ cfg,
+ )
+ push!(results_vector[i], r)
+ end
+end
+
+"""
+ create_results(evts, raw_erp::Vector, s_erp::Vector, r_erp::Vector, c_erp::Vector, c_latencies_df, cfg::RideConfig)
+
+Create a RideResults object from the given parameters.
+Pads component erps to be the same length as one epoch. Result also includes unpadded versions.
+Changes the c_latencies to be from stimulus onset instead of epoch start.
+
+# Returns
+- `result::RideResults` : The created RideResults object, the interim_results field is left empty.
+"""
+function create_results(
+ evts,
+ raw_erp::Vector,
+ s_erp::Vector,
+ r_erp::Vector,
+ c_erp::Vector,
+ c_latencies_df,
+ cfg::RideConfig,
+)
+ evts_s = @subset(evts, :event .== 'S')
+ evts_r = @subset(evts, :event .== 'R')
+
+ # pad erps to have the same size as one epoch
+ mean_s_latency = round(Int, ((-cfg.epoch_range[1] + cfg.s_range[1]) * cfg.sfreq))
+ evts_r_latencies_from_s = (evts_r.latency - evts_s.latency)
+ median_r_latency = round(
+ Int,
+ median(evts_r_latencies_from_s) + (cfg.r_range[1] * cfg.sfreq) -
+ (cfg.epoch_range[1] * cfg.sfreq),
+ )
+ median_c_latency = round(Int, median(c_latencies_df.latency))
+ s_erp_padded = pad_erp_to_epoch_size(s_erp, mean_s_latency, cfg)
+ r_erp_padded = pad_erp_to_epoch_size(r_erp, median_r_latency, cfg)
+ c_erp_padded = pad_erp_to_epoch_size(c_erp, median_c_latency, cfg)
+
+ #add the epoch range to the c_latencies as the output should be latency
+ #from stimulus onset, not from epoch onset
+ c_latencies_from_stimulus_onset =
+ c_latencies_df.latency .+ (cfg.epoch_range[1] * cfg.sfreq)
+
+ result = RideResults(
+ raw_erp = raw_erp,
+ s_erp = s_erp_padded,
+ r_erp = r_erp_padded,
+ c_erp = c_erp_padded,
+ s_erp_unpadded = s_erp,
+ r_erp_unpadded = r_erp,
+ c_erp_unpadded = c_erp,
+ c_latencies = c_latencies_from_stimulus_onset,
+ )
+ return result
+end
+
+"""
+ pad_erp_to_epoch_size(erp::Vector{Float64}, latency_from_epoch_start::Int64, cfg::RideConfig)
+
+Pad the given erp to the epoch size using the given latency.
+
+# Returns
+- `padded_erp::Vector{Float64}` : A new padded erp.
+"""
+function pad_erp_to_epoch_size(
+ erp::Vector{Float64},
+ latency_from_epoch_start::Int64,
+ cfg::RideConfig,
+)
+ epoch_length = round(Int, (cfg.epoch_range[2] - cfg.epoch_range[1]) * cfg.sfreq)
+ padding_front_length = round(Int, latency_from_epoch_start)
+ padding_front = zeros(Float64, max(padding_front_length, 0))
+ padding_back_length = epoch_length - size(padding_front, 1) - size(erp, 1)
+ padding_back = zeros(Float64, max(padding_back_length, 0))
+ padded_erp::Vector{Float64} = vcat(padding_front, erp, padding_back)
+ return padded_erp
+end
\ No newline at end of file
diff --git a/src/ride/ride_structs.jl b/src/ride/ride_structs.jl
new file mode 100644
index 0000000..fd63035
--- /dev/null
+++ b/src/ride/ride_structs.jl
@@ -0,0 +1,120 @@
+"""
+ @with_kw struct RideConfig
+
+A struct holding the configuration values of the RIDE algorithm.
+
+# Fields
+- `sfreq::Int`: The sampling frequency of the data.
+- `s_range::Vector{Float64}`: The range of the S component. Usually determined
+through manual inspection of the data.
+- `r_range::Vector{Float64}`: The range of the R component. Usually determined
+through manual inspection of the data.
+- `c_range::Vector{Float64}`: The range of the C component. Usually determined
+through manual inspection of the data.
+- `c_estimation_range::Vector{Float64}`: The range used for the intial C
+component latency estimation through peak picking.
+- `epoch_range::Vector{Float64}`: The range of one epoch centered around the stimulus onset.
+- `iteration_limit::Int = 4`: The maximum number of iterations of the RIDE algorithm. This
+is for the outer decomposition-latency estimation loop.
+- `heuristic1::Bool = true`: A flag to enable/disable heuristic 1. This heursitic ensures a
+monoton latency evolution.
+- `heuristic2::Bool = true`: A flag to enable/disable heuristic 2. This heuristic randomizes
+and fixes a latency on encountering a convex xcorrelation result.
+- `heuristic2_rng = MersenneTwister(1234)`: The random number generator used for heuristic 2.
+- `heuristic3::Bool = true`: A flag to enable/disable heuristic 3. This heuristic searches for
+competing peaks in the xcorrelation results. The peak closest to the previous latency is chosen.
+- `heuristic3_threshhold::Float64 = 0.9`: The threshold used for heuristic 3. If the peak is
+below this threshold * the maximum peak, it is considered a competing peak.
+- `filtering::Bool = true`: A flag to enable/disable filtering of the data before performing the
+cross correlation.
+- `save_interim_results::Bool = false`: A flag to enable/disable saving the interim results of
+each iteration of the RIDE algorithm.
+
+
+# Examples
+```julia-repl
+cfg = RideConfig(
+ sfreq = 100,
+ s_range = [-0.2, 0.4],
+ r_range = [0, 0.8],
+ c_range = [-0.4, 0.4],
+ c_estimation_range = [-0.1, 0.9],
+ epoch_range = [-0.3, 1.6],
+ iteration_limit = 5,
+ heuristic1 = true,
+ heuristic2 = true,
+ heuristic3 = true,
+ save_interim_results = true,
+)
+```
+"""
+@with_kw struct RideConfig
+ sfreq::Int
+ s_range::Vector{Float64}
+ r_range::Vector{Float64}
+ c_range::Vector{Float64}
+ c_estimation_range::Vector{Float64}
+ epoch_range::Vector{Float64}
+ iteration_limit::Int = 4
+ heuristic1::Bool = true
+ heuristic2::Bool = true
+ heuristic2_rng = MersenneTwister(1234)
+ heuristic3::Bool = true
+ heuristic3_threshhold::Float64 = 0.9
+ filtering::Bool = true
+ save_interim_results::Bool = false
+end
+
+"""
+ abstract type AbstractMode
+
+An abstract type that defines different modes of the RIDE algorithm.
+"""
+abstract type AbstractMode end
+
+"""
+ struct ClassicMode <: AbstractMode
+
+A struct representing the ClassicMode of RIDE. Pass it to the RIDE algorithm to
+ run in Classic mode.
+"""
+struct ClassicMode <: AbstractMode end
+
+"""
+ struct UnfoldMode <: AbstractMode
+
+A struct representing the UnfoldMode of RIDE. Pass it to the RIDE algorithm to
+ run in Unfold mode.
+"""
+struct UnfoldMode <: AbstractMode end
+
+"""
+ @with_kw mutable struct RideResults
+
+A struct holding the results of a single run of the RIDE algorithm.
+
+# Fields
+- `interim_results::Vector{RideResults}`: A vector of `RideResults` structs that
+hold the interim results of each iteration of the RIDE algorithm. This field is
+only filled when the algorithm is run with the `save_interim_results` flag.
+- `raw_erp::Array{Float64}`: The raw ERP.
+- `s_erp::Array{Float64}`: The ERP of the S component.
+- `r_erp::Array{Float64}`: The ERP of the R component.
+- `c_erp::Array{Float64}`: The ERP of the C component.
+- `s_erp_unpadded::Array{Float64}`: The ERP of the S component, unpadded.
+- `r_erp_unpadded::Array{Float64}`: The ERP of the R component, unpadded.
+- `c_erp_unpadded::Array{Float64}`: The ERP of the C component, unpadded.
+- `c_latencies::Array{Int64}`: The latencies of the C component from the stimulus
+onset.
+"""
+@with_kw mutable struct RideResults
+ interim_results::Vector{RideResults} = []
+ raw_erp::Array{Float64} = []
+ s_erp::Array{Float64} = []
+ r_erp::Array{Float64} = []
+ c_erp::Array{Float64} = []
+ s_erp_unpadded::Array{Float64} = []
+ r_erp_unpadded::Array{Float64} = []
+ c_erp_unpadded::Array{Float64} = []
+ c_latencies::Array{Int64} = []
+end
\ No newline at end of file
diff --git a/src/ride/ride_unfold_algorithm.jl b/src/ride/ride_unfold_algorithm.jl
new file mode 100644
index 0000000..bc6d032
--- /dev/null
+++ b/src/ride/ride_unfold_algorithm.jl
@@ -0,0 +1,226 @@
+ride_algorithm(Modus::Type{UnfoldMode}, data::Array{Float64}, evts, cfg::RideConfig) =
+ ride_algorithm(Modus, reshape(data, (1, :)), evts, cfg)
+
+function ride_algorithm(
+ Modus::Type{UnfoldMode},
+ data::Array{Float64,2},
+ evts,
+ cfg::RideConfig,
+)
+ @debug "Running RIDE algorithm with cfg: $cfg"
+ @assert cfg.s_range[1] >= cfg.epoch_range[1] && cfg.s_range[2] <= cfg.epoch_range[2] "S range must be within the epoch range"
+ @assert cfg.c_estimation_range[1] >= cfg.epoch_range[1] &&
+ cfg.c_estimation_range[2] <= cfg.epoch_range[2] "C estimation range must be within the epoch range"
+
+ ## data_preparation
+ evts_s = @subset(evts, :event .== 'S')
+ evts_r = @subset(evts, :event .== 'R')
+ interim_results = Vector{Vector}()
+ for i in axes(data, 1)
+ push!(interim_results, Vector{RideResults}())
+ end
+ ##
+
+ #epoch data with the cfg.epoch_range to see how many epochs we have
+ #cut evts to match the determined number of epochs
+ #the resulting data_epoched is also used for the c latency estimation
+ data_epoched, data_epoched_times =
+ Unfold.epoch(data = data, tbl = evts_s, τ = cfg.epoch_range, sfreq = cfg.sfreq)
+ n, data_epoched = Unfold.drop_missing_epochs(evts_s, data_epoched)
+ number_epochs = size(data_epoched, 3)
+ raw_erp = mean(data_epoched, dims = 3)[:, :, 1]
+ #@assert size(evts) == (number_epochs * 2) "Size of evts is $(size(evts)) but should be $(number_epochs * 2)"
+ evts_s = evts_s[1:number_epochs, :]
+ evts_r = evts_r[1:number_epochs, :]
+
+ #reduce evts to the number of epochs
+ while size(evts, 1) > number_epochs * 2
+ deleteat!(evts, size(evts, 1))
+ end
+ @assert size(evts, 1) == number_epochs * 2 "Size of evts is $(size(evts,1)) but should be $(number_epochs*2)"
+ ##
+
+ ## initial unfold deconvolution
+ m = fit(
+ UnfoldModel,
+ [
+ 'S' => (@formula(0 ~ 1), firbasis(cfg.s_range, cfg.sfreq, "")),
+ 'R' => (@formula(0 ~ 1), firbasis(cfg.r_range, cfg.sfreq, "")),
+ ],
+ evts,
+ data,
+ )
+ c_table = coeftable(m)
+ s_erp = Matrix{Float64}(
+ undef,
+ size(data, 1),
+ size(@subset(c_table, :eventname .== 'S', :channel .== 1), 1),
+ )
+ r_erp = Matrix{Float64}(
+ undef,
+ size(data, 1),
+ size(@subset(c_table, :eventname .== 'R', :channel .== 1), 1),
+ )
+ for i in range(1, size(data, 1))
+ s_erp[i, :] = @subset(c_table, :eventname .== 'S', :channel .== i).estimate
+ r_erp[i, :] = @subset(c_table, :eventname .== 'R', :channel .== i).estimate
+ end
+ ##
+
+ ## initial residue calculation (data minus S and R)
+ yhat = predict(m, overlap = true)
+ y = data
+ residuals_without_SR = Unfold._residuals(UnfoldModel, yhat, y)
+ ##
+
+ ## initial C latency estimation
+ c_latencies_df = initial_peak_estimation(residuals_without_SR, evts_s, cfg)
+ evts_c = build_c_evts_table(c_latencies_df, evts_s, cfg)
+ ##
+
+ ## calculate first c_erp from initial latencies and residue/data
+ data_residuals_c_epoched, times = Unfold.epoch(
+ data = residuals_without_SR,
+ tbl = evts_c,
+ τ = c_range_adjusted(cfg.c_range),
+ sfreq = cfg.sfreq,
+ )
+ n, data_residuals_c_epoched =
+ Unfold.drop_missing_epochs(evts_c, data_residuals_c_epoched)
+ c_erp = median(data_residuals_c_epoched, dims = 3)
+ c_erp = c_erp[:, :, 1]
+ ##
+
+ ## save interim results
+ if cfg.save_interim_results
+ save_interim_results!(
+ interim_results,
+ evts,
+ raw_erp,
+ s_erp,
+ r_erp,
+ c_erp,
+ c_latencies_df,
+ cfg,
+ )
+ end
+
+ ## initial pattern matching with the first calculated c_erp
+ c_latencies_df_prev_prev = nothing
+ c_latencies_df_prev = deepcopy(c_latencies_df)
+ for i in range(1, size(c_latencies_df, 1))
+ c_latencies_df[i], xcorr = unfold_pattern_matching(
+ c_latencies_df[i],
+ residuals_without_SR[i, :],
+ c_erp[i, :],
+ evts_s,
+ cfg,
+ )
+ end
+
+ evts_c = build_c_evts_table(c_latencies_df, evts_s, cfg)
+ ##
+
+ ## save interim results
+ if cfg.save_interim_results
+ save_interim_results!(
+ interim_results,
+ evts,
+ raw_erp,
+ s_erp,
+ r_erp,
+ c_erp,
+ c_latencies_df,
+ cfg,
+ )
+ end
+
+
+ ## iteration start
+ for i in range(1, cfg.iteration_limit)
+ ## decompose data into S, R and C components using the current C latencies
+ evts_with_c = sort(vcat(evts, evts_c), [:latency])
+ s_erp, r_erp, c_erp, residue = unfold_decomposition(data, evts_with_c, cfg)
+ ##
+
+ ## update C latencies and apply heuristics
+ for n in axes(data, 1)
+ ## update C latencies via pattern matching
+ if cfg.filtering
+ residue[n, :] = dspfilter(residue[n, :], 5, 20)
+ end
+ c_latencies_df[n], xcorr, onset = unfold_pattern_matching(
+ c_latencies_df[n],
+ residue[n, :],
+ c_erp[n, :],
+ evts_s,
+ cfg,
+ )
+ ##
+
+ ## heuristics
+ if cfg.heuristic1 && i > 1
+ heuristic1_monoton_latency_changes!(
+ c_latencies_df[n],
+ c_latencies_df_prev[n],
+ c_latencies_df_prev_prev[n],
+ )
+ end
+
+ if cfg.heuristic2
+ heuristic2_randomize_latency_on_convex_xcorr!(
+ c_latencies_df[n],
+ c_latencies_df_prev[n],
+ xcorr,
+ cfg.heuristic2_rng,
+ )
+ end
+
+ if cfg.heuristic3
+ heuristic3_pick_closest_xcorr_peak!(
+ c_latencies_df[n],
+ c_latencies_df_prev[n],
+ xcorr;
+ equality_threshold = cfg.heuristic3_threshhold,
+ onset = onset,
+ )
+ end
+ end
+
+ c_latencies_df_prev_prev = deepcopy(c_latencies_df_prev)
+ c_latencies_df_prev = deepcopy(c_latencies_df)
+ evts_c = build_c_evts_table(c_latencies_df, evts_s, cfg)
+ ##
+
+ ## save interim results
+ if cfg.save_interim_results
+ save_interim_results!(
+ interim_results,
+ evts,
+ raw_erp,
+ s_erp,
+ r_erp,
+ c_erp,
+ c_latencies_df,
+ cfg,
+ )
+ end
+ end
+
+ results = Vector{RideResults}()
+ for i in axes(data, 1)
+ r = create_results(
+ evts,
+ raw_erp[i, :],
+ s_erp[i, :],
+ r_erp[i, :],
+ c_erp[i, :],
+ c_latencies_df[i],
+ cfg,
+ )
+ r.interim_results = interim_results[i]
+ push!(results, r)
+ end
+
+ return results
+end
\ No newline at end of file
diff --git a/src/ride/ride_unfold_methods.jl b/src/ride/ride_unfold_methods.jl
new file mode 100644
index 0000000..981633a
--- /dev/null
+++ b/src/ride/ride_unfold_methods.jl
@@ -0,0 +1,66 @@
+function unfold_pattern_matching(latencies_df, data_residuals_continous, c_erp, evts, cfg)
+ #epoch residue
+ evts_s = @subset(evts, :event .== 'S')
+ data_residuals_epoched, times = Unfold.epoch(
+ data = data_residuals_continous,
+ tbl = evts_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )
+ n, data_residuals_epoched = Unfold.drop_missing_epochs(evts_s, data_residuals_epoched)
+
+ xc, result, onset = findxcorrpeak(data_residuals_epoched[1, :, :], c_erp)
+
+ for (i, row) in enumerate(eachrow(latencies_df))
+ if (row.fixed)
+ continue
+ end
+ row.latency = result[i]
+ end
+
+ return latencies_df, xc, onset
+end
+
+function unfold_decomposition(data, evts_with_c, cfg)
+ #unfold deconvolution; TODO: make the fit more general, i.e. let the user provide the model structure
+ m = fit(
+ UnfoldModel,
+ [
+ 'S' => (@formula(0 ~ 1), firbasis(cfg.s_range, cfg.sfreq, "")),
+ 'R' => (@formula(0 ~ 1), firbasis(cfg.r_range, cfg.sfreq, "")),
+ 'C' => (
+ @formula(0 ~ 1),
+ firbasis(c_range_adjusted(cfg.c_range), cfg.sfreq, ""),
+ ),
+ ],
+ evts_with_c,
+ data,
+ )
+ c_table = coeftable(m)
+ s_erp = Matrix{Float64}(
+ undef,
+ size(data, 1),
+ size(@subset(c_table, :eventname .== 'S', :channel .== 1), 1),
+ )
+ r_erp = Matrix{Float64}(
+ undef,
+ size(data, 1),
+ size(@subset(c_table, :eventname .== 'R', :channel .== 1), 1),
+ )
+ c_erp = Matrix{Float64}(
+ undef,
+ size(data, 1),
+ size(@subset(c_table, :eventname .== 'C', :channel .== 1), 1),
+ )
+ for i in range(1, size(data, 1))
+ s_erp[i, :] = @subset(c_table, :eventname .== 'S', :channel .== i).estimate
+ r_erp[i, :] = @subset(c_table, :eventname .== 'R', :channel .== i).estimate
+ c_erp[i, :] = @subset(c_table, :eventname .== 'C', :channel .== i).estimate
+ end
+
+ yhat = predict(m, exclude_basis = 'C', overlap = true)
+ y = data
+ residuals_without_SR = Unfold._residuals(UnfoldModel, yhat, y)
+
+ return s_erp, r_erp, c_erp, residuals_without_SR
+end
\ No newline at end of file
diff --git a/test/Project.toml b/test/Project.toml
index 0c36332..2d94635 100644
--- a/test/Project.toml
+++ b/test/Project.toml
@@ -1,2 +1,21 @@
[deps]
+DSP = "717857b8-e6f2-59f4-9121-6e50c889abd2"
+DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
+DataFramesMeta = "1313f7d8-7da2-5740-9ea0-a2ca25f37964"
+Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
+FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341"
+HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
+LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
+Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a"
+Peaks = "18e31ff7-3703-566c-8e60-38913d67486b"
+Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+SignalAnalysis = "df1fea92-c066-49dd-8b36-eace3378ea47"
+StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
+Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
+Unfold = "181c99d8-e21b-4ff3-b70b-c233eddec679"
+UnfoldSim = "ed8ae6d2-84d3-44c6-ab46-0baf21700804"
+UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"
+
+[sources]
+UnfoldSim = {rev = "v4.0", url = "https://github.com/unfoldtoolbox/UnfoldSim.jl"}
\ No newline at end of file
diff --git a/test/runtests.jl b/test/runtests.jl
index d04bce8..497bc8e 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -1,5 +1,20 @@
using UnfoldRIDE
using Test
+using Unfold
+using UnfoldSim
+using Statistics
+using SignalAnalysis
+using DataFrames
+using DataFramesMeta
+using Distributions
+using Random
+using Parameters
+using DSP
+using FFTW
+using Peaks
+using UnicodePlots
+using StableRNGs
+using LinearAlgebra
#=
Don't add your tests to runtests.jl. Instead, create files named
@@ -18,4 +33,4 @@ for (root, dirs, files) in walkdir(@__DIR__)
include(file)
end
end
-end
+end
\ No newline at end of file
diff --git a/test/simulate_test_data.jl b/test/simulate_test_data.jl
new file mode 100644
index 0000000..490d23a
--- /dev/null
+++ b/test/simulate_test_data.jl
@@ -0,0 +1,202 @@
+using UnfoldSim
+using Random
+using Unfold
+using StableRNGs
+using Parameters
+using HDF5
+using DataFrames
+using DataFramesMeta
+using Statistics
+using DSP
+
+@with_kw mutable struct simulation_inputs
+ rng::AbstractRNG = MersenneTwister(1234)
+ noise::AbstractNoise = NoNoise()
+ s_width::Int = 0
+ s_offset::Int = 200
+ s_beta::Int = 1
+ s_continous::Int = 1
+ c_width::Int = 30
+ c_offset::Int = 30
+ c_beta::Int = 5
+ r_width::Int = 60
+ r_offset::Int = 15
+ r_beta::Int = 5
+ r_continous::Int = 1
+end
+
+@with_kw struct SequenceOnset <: AbstractOnset
+ stimulus_onset::AbstractOnset
+ components_onset::Vector{AbstractOnset}
+end
+
+function UnfoldSim.simulate_onsets(rng, onset::SequenceOnset, simulation::Simulation)
+ #calculate stimulus onsets
+ stimulus_onsets =
+ simulate_interonset_distances(rng, onset.stimulus_onset, simulation.design)
+ stimulus_offset_accumulated = accumulate(+, stimulus_onsets, dims = 1, init = 0)
+
+ #calculate component offsets
+ components_onsets = Vector{Vector{Int}}()
+ for obj in onset.components_onset
+ Random.seed!(rng, rand(rng, 1:10000))
+ push!(components_onsets, simulate_interonset_distances(rng, obj, simulation.design))
+ end
+
+ #combine the stimulus offsets and component offsets into one vector
+ result = Vector{Int}()
+ for i in axes(stimulus_offset_accumulated, 1)
+ current_offset = stimulus_offset_accumulated[i]
+ push!(result, current_offset)
+ for component_onsets in components_onsets
+ push!(result, current_offset + component_onsets[i])
+ end
+ end
+
+ #cut result to the design size
+ result = result[1:size(simulation.design)]
+ return result
+end
+
+function simulate_default_plus_clean(simulation_inputs = simulation_inputs())
+ data, evts = default_sequence_design(simulation_inputs)
+
+ clean_inputs = deepcopy(simulation_inputs)
+ clean_inputs.s_offset = clean_inputs.s_offset + Int(round(clean_inputs.s_width / 2))
+ clean_inputs.s_width = 0
+ clean_inputs.r_offset = clean_inputs.r_offset + Int(round(clean_inputs.r_width / 2))
+ clean_inputs.r_width = 0
+ clean_inputs.c_offset = clean_inputs.c_offset + Int(round(clean_inputs.c_width / 2))
+ clean_inputs.c_width = 0
+ data_clean, evts_clean = default_sequence_design(clean_inputs)
+
+ s_clean_inputs = deepcopy(clean_inputs)
+ s_clean_inputs.r_beta = 0
+ s_clean_inputs.r_continous = 0
+ s_clean_inputs.c_beta = 0
+ data_clean_s, n = default_sequence_design(s_clean_inputs)
+
+ r_clean_inputs = deepcopy(clean_inputs)
+ r_clean_inputs.s_beta = 0
+ r_clean_inputs.s_continous = 0
+ r_clean_inputs.c_beta = 0
+ data_clean_r, n = default_sequence_design(r_clean_inputs)
+
+ c_clean_inputs = deepcopy(clean_inputs)
+ c_clean_inputs.s_beta = 0
+ c_clean_inputs.s_continous = 0
+ c_clean_inputs.r_beta = 0
+ c_clean_inputs.r_continous = 0
+ data_clean_c, n = default_sequence_design(c_clean_inputs)
+
+ return data, evts, data_clean, evts_clean, data_clean_s, data_clean_r, data_clean_c
+end
+
+function default_sequence_design(simulation_inputs = simulation_inputs())
+ # Define the design
+ design =
+ SingleSubjectDesign(;
+ conditions = Dict(
+ :condition => ["car", "face"],
+ :continuous => range(0, 1, length = 10),
+ ),
+ ) |> x -> RepeatDesign(x, 4)
+
+ sequence_design = SequenceDesign(design, "SCR")
+
+ # Define the components
+ s_component_p = LinearModelComponent(;
+ basis = p100(),
+ formula = @formula(0 ~ 1),
+ β = [simulation_inputs.s_beta],
+ )
+
+ s_component_n = LinearModelComponent(;
+ basis = n170(),
+ formula = @formula(0 ~ 1 + condition),
+ β = [simulation_inputs.s_beta, simulation_inputs.s_continous],
+ )
+
+ r_component = LinearModelComponent(;
+ basis = p300(),
+ formula = @formula(0 ~ 1 + continuous),
+ β = [simulation_inputs.r_beta, simulation_inputs.r_continous],
+ )
+
+ c_component = LinearModelComponent(;
+ basis = p100(), #vcat(p100().*0.3,zeros(7))+vcat(zeros(7),p100()), #
+ formula = @formula(0 ~ 1),
+ β = [simulation_inputs.c_beta],
+ )
+
+ onsetStimulus =
+ UniformOnset(width = simulation_inputs.s_width, offset = simulation_inputs.s_offset)
+
+ onsetC =
+ UniformOnset(width = simulation_inputs.c_width, offset = simulation_inputs.c_offset)
+
+ onsetR =
+ UniformOnset(width = simulation_inputs.r_width, offset = simulation_inputs.r_offset)
+
+ sequence_onset = SequenceOnset(onsetStimulus, [onsetC, onsetR])
+
+ components = Dict(
+ 'S' => [s_component_p, s_component_n],
+ 'C' => [c_component],
+ 'R' => [r_component],
+ )
+
+ data, evts = simulate(
+ simulation_inputs.rng,
+ sequence_design,
+ components,
+ sequence_onset,
+ simulation_inputs.noise,
+ )
+
+ return data, evts
+end
+
+function save_to_hdf5_ride_format(
+ filepath,
+ data,
+ evts,
+ epoch_range,
+ epoch_char,
+ reaction_char,
+ sfreq,
+)
+ evts_epoch_temp = @subset(evts, :event .== epoch_char)
+ data_epoched_temp, times =
+ Unfold.epoch(data = data, tbl = evts_epoch_temp, τ = epoch_range, sfreq = sfreq)
+ evts_epoch, data_epoched =
+ Unfold.drop_missing_epochs(evts_epoch_temp, data_epoched_temp)
+
+ #grab the reaction times from the epoched data
+ evts_r = @subset(evts, :event .== reaction_char)[!, :latency][1:size(data_epoched, 3)]
+ reaction_times = evts_r - evts_epoch[!, :latency]
+
+ #matlab_ride format: Matrix{Float64} with TimeStep:Channel:Trial
+ ride_matrix =
+ zeros(Float64, size(data_epoched, 2), size(data_epoched, 1), size(data_epoched, 3))
+
+ #fill the ride_matrix with the data
+ for x in axes(ride_matrix, 1)
+ for y in axes(ride_matrix, 2)
+ for z in axes(ride_matrix, 3)
+ ride_matrix[x, y, z] = data_epoched[y, x, z]
+ end
+ end
+ end
+
+ #todo maybe create chanlocs and save them here
+
+ h5open(filepath, "w") do file
+ # Save the 3D array
+ write(file, "dataset_data", ride_matrix)
+
+ # Save the vector rt
+ write(file, "dataset_rt", Float64.(reaction_times))
+ end
+
+end
\ No newline at end of file
diff --git a/test/test-algorithms.jl b/test/test-algorithms.jl
new file mode 100644
index 0000000..4581a58
--- /dev/null
+++ b/test/test-algorithms.jl
@@ -0,0 +1,236 @@
+include("./simulate_test_data.jl")
+using LinearAlgebra
+
+@testset "runner-unfold-ride" begin
+ #simulate data
+ begin
+ sim_inputs = simulation_inputs()
+ sim_inputs.noise = PinkNoise(; noiselevel = 1)
+ data, evts, data_clean, evts_clean, data_clean_s, data_clean_r, data_clean_c =
+ simulate_default_plus_clean(sim_inputs)
+ end
+
+ begin
+ #ENV["JULIA_DEBUG"] = "UnfoldRIDE"
+ #config for ride algorithm
+ cfg = RideConfig(
+ sfreq = 100,
+ s_range = [-0.2, 0.4],
+ r_range = [0, 0.8],
+ c_range = [-0.4, 0.4], # change to -0.4 , 0.4 or something because it's attached to the latency of C
+ c_estimation_range = [-0.1, 0.9],
+ epoch_range = [-0.3, 1.6],
+ iteration_limit = 5,
+ heuristic1 = true,
+ heuristic2 = true,
+ heuristic3 = true,
+ save_interim_results = false,
+ )
+
+ #remove the C events from the evts table, these will be estimated by the ride algorithm
+ evts_without_c = @subset(evts, :event .!= 'C')
+
+ #run the ride algorithm
+ results = ride_algorithm(UnfoldMode, data, evts_without_c, cfg)[1]
+ end
+
+ # calculate and plot clean erps from the simulated data
+ # these represent the optimal result from the algorithm
+ begin
+ evts_clean_s = @subset(evts_clean, :event .== 'S')
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_s,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_s = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_c,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_c = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_r,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_r = mean(data_epoched_clean, dims = 3)
+ end
+
+ #compare the clean erps to the estimated erps
+ begin
+ s_xcorr = xcorr(normalize(erp_clean_s[1, :, 1]), normalize(results.s_erp))
+ s_max = findmax(s_xcorr)
+ @test s_max[1] > 0.9
+
+ r_xcorr = xcorr(normalize(erp_clean_r[1, :, 1]), normalize(results.r_erp))
+ r_max = findmax(r_xcorr)
+ @test r_max[1] > 0.9
+
+ c_xcorr = xcorr(normalize(erp_clean_c[1, :, 1]), normalize(results.c_erp))
+ c_max = findmax(c_xcorr)
+ @test c_max[1] > 0.9
+ end
+end
+
+@testset "runner-unfold-ride" begin
+ #simulate data
+ begin
+ sim_inputs = simulation_inputs()
+ sim_inputs.noise = PinkNoise(; noiselevel = 1)
+ data, evts, data_clean, evts_clean, data_clean_s, data_clean_r, data_clean_c =
+ simulate_default_plus_clean(sim_inputs)
+ end
+
+ begin
+ #ENV["JULIA_DEBUG"] = "UnfoldRIDE"
+ #config for ride algorithm
+ cfg = RideConfig(
+ sfreq = 100,
+ s_range = [-0.2, 0.4],
+ r_range = [0, 0.8],
+ c_range = [-0.4, 0.4], # change to -0.4 , 0.4 or something because it's attached to the latency of C
+ c_estimation_range = [-0.1, 0.9],
+ epoch_range = [-0.3, 1.6],
+ iteration_limit = 5,
+ heuristic1 = true,
+ heuristic2 = true,
+ heuristic3 = true,
+ save_interim_results = false,
+ )
+
+ #remove the C events from the evts table, these will be estimated by the ride algorithm
+ evts_without_c = @subset(evts, :event .!= 'C')
+
+ #run the ride algorithm
+ results = ride_algorithm(ClassicMode, data, evts_without_c, cfg)[1]
+ end
+
+ # calculate and plot clean erps from the simulated data
+ # these represent the optimal result from the algorithm
+ begin
+ evts_clean_s = @subset(evts_clean, :event .== 'S')
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_s,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_s = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_c,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_c = mean(data_epoched_clean, dims = 3)
+
+ data_epoched_clean = Unfold.epoch(
+ data = data_clean_r,
+ tbl = evts_clean_s,
+ τ = cfg.epoch_range,
+ sfreq = cfg.sfreq,
+ )[1]
+ n, data_epoched_clean = Unfold.drop_missing_epochs(evts_clean_s, data_epoched_clean)
+ erp_clean_r = mean(data_epoched_clean, dims = 3)
+ end
+
+ #compare the clean erps to the estimated erps
+ begin
+ s_xcorr = xcorr(normalize(erp_clean_s[1, :, 1]), normalize(results.s_erp))
+ s_max = findmax(s_xcorr)
+ @test s_max[1] > 0.9
+
+ r_xcorr = xcorr(normalize(erp_clean_r[1, :, 1]), normalize(results.r_erp))
+ r_max = findmax(r_xcorr)
+ @test r_max[1] > 0.9
+
+ c_xcorr = xcorr(normalize(erp_clean_c[1, :, 1]), normalize(results.c_erp))
+ c_max = findmax(c_xcorr)
+ @test c_max[1] > 0.9
+ end
+end
+
+@testset "alrogithm-keywordargs" begin
+ #simulate data
+ begin
+ sim_inputs = simulation_inputs()
+ sim_inputs.noise = PinkNoise(; noiselevel = 1)
+ data, evts, data_clean, evts_clean, data_clean_s, data_clean_r, data_clean_c =
+ simulate_default_plus_clean(sim_inputs)
+ end
+
+ begin
+ #ENV["JULIA_DEBUG"] = "UnfoldRIDE"
+ #config for ride algorithm
+ cfg = RideConfig(
+ sfreq = 100,
+ s_range = [-0.2, 0.4],
+ r_range = [0, 0.8],
+ c_range = [-0.4, 0.4], # change to -0.4 , 0.4 or something because it's attached to the latency of C
+ c_estimation_range = [-0.1, 0.9],
+ epoch_range = [-0.3, 1.6],
+ iteration_limit = 5,
+ heuristic1 = true,
+ heuristic2 = true,
+ heuristic3 = true,
+ save_interim_results = false,
+ )
+
+ #remove the C events from the evts table, these will be estimated by the ride algorithm
+ evts_without_c = @subset(evts, :event .!= 'C')
+
+ #run the ride algorithm
+ results = ride_algorithm(ClassicMode, data, evts_without_c, cfg)[1]
+
+ #run the ride algorithm with keyword arguments
+ results_kw = ride_algorithm(
+ ClassicMode,
+ data,
+ evts_without_c;
+ sfreq = 100,
+ s_range = [-0.2, 0.4],
+ r_range = [0, 0.8],
+ c_range = [-0.4, 0.4],
+ c_estimation_range = [-0.1, 0.9],
+ epoch_range = [-0.3, 1.6],
+ iteration_limit = 5,
+ heuristic1 = true,
+ heuristic2 = true,
+ heuristic3 = true,
+ save_interim_results = false,
+ )[1]
+ @test results.c_erp == results_kw.c_erp
+ @test results.c_latencies == results_kw.c_latencies
+ end
+end
\ No newline at end of file
diff --git a/test/test-basic-test.jl b/test/test-basic-test.jl
deleted file mode 100644
index f3fc801..0000000
--- a/test/test-basic-test.jl
+++ /dev/null
@@ -1,3 +0,0 @@
-@testset "UnfoldRIDE.jl" begin
- @test UnfoldRIDE.hello_world() == "Hello, World!"
-end
diff --git a/test/test-ride_original_methods.jl b/test/test-ride_original_methods.jl
new file mode 100644
index 0000000..e24b515
--- /dev/null
+++ b/test/test-ride_original_methods.jl
@@ -0,0 +1,62 @@
+include("../src/ride/ride_classic_methods.jl")
+using UnicodePlots
+
+function createTestData()
+ design =
+ SingleSubjectDesign(; conditions = Dict(:condA => ["LevelA"])) |>
+ x -> RepeatDesign(x, 5)
+ p1 = LinearModelComponent(; basis = hanning(100), formula = @formula(0 ~ 1), β = [1])
+ onset = UniformOnset(width = 0, offset = 200)
+ data, evts = simulate(MersenneTwister(1), design, [p1], onset)
+ return data, evts
+end
+
+@testset "ride_original_methods.jl" begin
+ @testset "subtract_to_data1" begin
+ sfreq = 100
+
+ data = reshape(
+ vcat(zeros(100), hanning(100), zeros(100), hanning(100), zeros(100)),
+ (1, :),
+ )
+ evts = DataFrame(:event => ['B', 'B'], :latency => [101, 301])
+ range_test = [0.0, 1.0]
+
+ erp_to_subtract = reshape(hanning(100), (1, :, 1))
+
+ result_zero = subtract_to_data(data, [(evts, erp_to_subtract, range_test)], sfreq)
+
+ display(lineplot(data[1, :], title = "data"))
+ display(lineplot(erp_to_subtract[1, :], title = "erp_to_subtract"))
+ display(
+ lineplot(
+ result_zero[1, :],
+ title = "result after subtraction (should be zeros)",
+ ),
+ )
+
+ @test result_zero[1, :] == zeros(length(result_zero[1, :]))
+ end
+
+ @testset "subtract_to_data2" begin
+ sfreq = 100
+ data, evts = createTestData()
+ range_test = [0.0, 1.0]
+ data = reshape(data, (1, :))
+
+ erp_to_subtract = reshape(hanning(100), (1, :, 1))
+
+ result_zero = subtract_to_data(data, [(evts, erp_to_subtract, range_test)], sfreq)
+
+ display(lineplot(data[1, :], title = "data"))
+ display(lineplot(erp_to_subtract[1, :], title = "erp_to_subtract"))
+ display(
+ lineplot(
+ result_zero[1, :],
+ title = "result after subtraction (should be zeros)",
+ ),
+ )
+
+ @test result_zero[1, :] == zeros(length(result_zero[1, :]))
+ end
+end
\ No newline at end of file
diff --git a/test/test-ride_shared_methods.jl b/test/test-ride_shared_methods.jl
new file mode 100644
index 0000000..4a7b5fe
--- /dev/null
+++ b/test/test-ride_shared_methods.jl
@@ -0,0 +1,131 @@
+include("../src/ride/ride_shared_methods.jl")
+
+@testset "ride_shared_methods.jl" begin
+
+ @testset "findxcorrpeak" begin
+ d = UnfoldSim.pad_array(hanning(10), -35, 0)
+ kernel = hanning(20)
+
+
+ f = lineplot(d, title = "data + kernel")
+ lineplot!(f, kernel)
+ display(f)
+ xc, m, onset = findxcorrpeak(d, kernel)
+ display(lineplot(xc[1], title = "xcorr"))
+
+ f = lineplot(d, title = "data + kernel corrected")
+ lineplot!(f, vcat(zeros(m[1]), kernel))
+ display(f)
+
+ using Test
+ @test findxcorrpeak(d, kernel)[2] == [30]
+ @test findxcorrpeak(d, kernel; window = true)[2] == [30]
+ end
+
+ ## test heuristic1
+ @testset "heuristic1" begin
+ latencies_df = DataFrame(latency = [75, 33], fixed = [false, false])
+ latencies_df_old = DataFrame(latency = [70, 30], fixed = [false, false])
+ latencies_df_old_old = DataFrame(latency = [50, 40], fixed = [false, false])
+
+ heuristic1_monoton_latency_changes!(
+ latencies_df,
+ latencies_df_old,
+ latencies_df_old_old,
+ )
+
+ @test latencies_df.latency == [75, 30]
+ @test latencies_df.fixed == [false, true]
+ end
+
+ ## test heuristic2
+ @testset "heuristic2" begin
+ epoch1 = reshape(vcat(zeros(100), hanning(100) .* -1), (1, :, 1))
+ epoch2 = reshape(vcat(zeros(100), hanning(100)), (1, :, 1))
+ data_epoched = cat(epoch1, epoch2, dims = 3)
+
+ latencies_df = DataFrame(latency = [0, 100], fixed = [false, false])
+ latencies_df_old = DataFrame(latency = [50, 60], fixed = [false, false])
+
+ xc, xc_values = findxcorrpeak(data_epoched[1, :, :], hanning(100))
+ latencies_df_old = copy(latencies_df)
+ latencies_df.latency = copy(xc_values)
+
+ xc = [hanning(100) .* -1, hanning(100)]
+
+ rng = MersenneTwister(12345)
+ heuristic2_randomize_latency_on_convex_xcorr!(
+ latencies_df,
+ latencies_df_old,
+ xc,
+ rng,
+ )
+
+ #check that latencies_df.latency was randomized during the heuristic
+ @test latencies_df.latency != xc_values
+ #after randomization, the latency should be fixed
+ @test latencies_df.fixed == [true, false]
+ end
+
+ ## test heuristic3
+ @testset "heuristic3" begin
+ cfg = RideConfig(
+ sfreq = 100,
+ c_range = [-0.4, 0.4],
+ s_range = [0, 0],
+ r_range = [0, 0],
+ c_estimation_range = [0.2, 1.2],
+ epoch_range = [-0.3, 1.6],
+ )
+ #identical epochs with perfect match at 100 and subpar match at 300
+ epoch1 = reshape(
+ vcat(zeros(100), hanning(100), zeros(100), hanning(100) .* 0.9, zeros(100)),
+ (1, :, 1),
+ )
+ epoch2 = reshape(
+ vcat(zeros(100), hanning(100), zeros(100), hanning(100) .* 0.9, zeros(100)),
+ (1, :, 1),
+ )
+ data_epoched = cat(epoch1, epoch2, dims = 3)
+
+ #same latency for both epochs
+ latencies_df = DataFrame(latency = [100, 100], fixed = [false, false])
+ #201 is closer to the subpar 300 peak in the previous latencies, should trigger heuristic
+ latencies_df_old = DataFrame(latency = [201, 200], fixed = [false, false])
+ xc, xc_values, onset = findxcorrpeak(data_epoched[1, :, :], hanning(100))
+
+ heuristic3_pick_closest_xcorr_peak!(
+ latencies_df,
+ latencies_df_old,
+ xc;
+ equality_threshold = 0.8,
+ onset = onset,
+ )
+
+ @test latencies_df.latency == [300, 100]
+ @test latencies_df.fixed == [false, false]
+ end
+
+ #test dspfilter
+ @testset "dspfilter" begin
+ xcorr_max = []
+ xcorr_max_noisy = []
+ for i = 1:10000
+ data = vcat(zeros(100), hanning(100), hanning(100) .* -1)
+
+ noise = PinkNoise(; noiselevel = 0.1)
+ data_noisy = copy(data)
+ UnfoldSim.add_noise!(MersenneTwister(i), noise, data_noisy)
+
+ data_filtered = dspfilter(data_noisy, 5, 100)
+
+ xcorr_result = xcorr(normalize(data), normalize(data_filtered); padmode = :none)
+ push!(xcorr_max, findmax(xcorr_result)[1])
+
+ xcorr_noisy = xcorr(normalize(data), normalize(data_noisy); padmode = :none)
+ push!(xcorr_max_noisy, findmax(xcorr_noisy)[1])
+ end
+ @test !(false in (xcorr_max .> 0.9))
+ @test !(0 in (xcorr_max_noisy .< xcorr_max))
+ end
+end
\ No newline at end of file