Skip to content

Commit 5ed30ff

Browse files
author
Greg Cohen
committed
Added a few more datasets and fixed a tag error
1 parent a5915d7 commit 5ed30ff

File tree

9 files changed

+2198
-1
lines changed

9 files changed

+2198
-1
lines changed

datasets/DAVIS-RS-EVENT.md

Lines changed: 308 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,308 @@
1+
---
2+
{
3+
"name": "DAVIS-RS-EVENT",
4+
"aliases": [
5+
"DRE"
6+
],
7+
"year": 2025,
8+
"modalities": [
9+
"Vision"
10+
],
11+
"sensors": [
12+
"DAVIS346"
13+
],
14+
"other_sensors": [],
15+
"category": "Intensity Reconstruction, Optical Flow, and Frame Fusion",
16+
"tags": [
17+
"Shutter Unrolling"
18+
],
19+
"description": "Event-based Shutter Unrolling Dataset",
20+
"dataset_properties": {
21+
"available_online": true,
22+
"has_real_data": true,
23+
"has_simulated_data": false,
24+
"has_ground_truth": false,
25+
"has_frames": true,
26+
"has_biases": false,
27+
"distribution_methods": [
28+
"Google Drive"
29+
],
30+
"file_formats": [
31+
"HDF5"
32+
],
33+
"availability_comment": "",
34+
"dataset_links": [
35+
{
36+
"name": "Google Drive",
37+
"url": "https://drive.google.com/drive/folders/1GhUP-dJIQbutI3uipeuGPMJmfA2u-ACR",
38+
"format": "HDF5",
39+
"available": true
40+
}
41+
],
42+
"size_gb": 9.5,
43+
"size_type": "Compressed"
44+
},
45+
"paper": {
46+
"title": "Self-supervised Shutter Unrolling with Events",
47+
"doi": "10.1007/s11263-025-02364-z",
48+
"authors": [
49+
"Mingyuan Lin",
50+
"Yangguang Wang",
51+
"Xiang Zhang",
52+
"Boxin Shi",
53+
"Wen Yang",
54+
"Chu He",
55+
"Gui-song Xia",
56+
"Lei Yu"
57+
],
58+
"abstract": "Continuous-time Global Shutter Video Recovery (CGVR) faces a substantial challenge in recovering undistorted high frame-rate Global Shutter (GS) videos from distorted Rolling Shutter (RS) images. This problem is severely ill-posed due to the absence of temporal dynamic information within RS intra-frame scanlines and inter-frame exposures, particularly when prior knowledge about camera/object motions is unavailable. Commonly used artificial assumptions on scenes/motions and data-specific characteristics are prone to producing sub-optimal solutions in real-world scenarios. To address this challenge, we propose an event-based CGVR network within a self-supervised learning paradigm, i.e., SelfUnroll, and leverage the extremely high temporal resolution of event cameras to provide accurate inter/intra-frame dynamic information. Specifically, an Event-based Inter/intra-frame Compensator (E-IC) is proposed to predict the per-pixel dynamic between arbitrary time intervals, including the temporal transition and spatial translation. Exploring connections in terms of RS-RS, RS-GS, and GS-RS, we explicitly formulate mutual constraints with the proposed E-IC, resulting in supervisions without ground-truth GS images. Extensive evaluations over synthetic and real datasets demonstrate that the proposed method achieves state-of-the-art methods and shows remarkable performance for event-based RS2GS inversion in real-world scenarios. The dataset and code are available at https://w3un.github.io/selfunroll/.",
59+
"open_access": false
60+
},
61+
"citation_counts": [
62+
{
63+
"source": "crossref",
64+
"count": 0,
65+
"updated": "2025-09-12T16:25:21.707888"
66+
},
67+
{
68+
"source": "scholar",
69+
"count": 1,
70+
"updated": "2025-09-12T16:25:23.257051"
71+
}
72+
],
73+
"links": [
74+
{
75+
"type": "project_page",
76+
"url": "https://w3un.github.io/selfunroll/"
77+
},
78+
{
79+
"type": "github_page",
80+
"url": "https://github.com/w3un/selfunroll_code"
81+
}
82+
],
83+
"full_name": "",
84+
"additional_metadata": {
85+
"num_recordings": "100"
86+
},
87+
"referenced_papers": [
88+
{
89+
"doi": "10.1109/CVPR42600.2020.00258",
90+
"source": "crossref"
91+
},
92+
{
93+
"doi": "10.1109/CVPR.2010.5539932",
94+
"source": "crossref"
95+
},
96+
{
97+
"doi": "10.1109/CVPR.2019.00382",
98+
"source": "crossref"
99+
},
100+
{
101+
"doi": "10.1109/TPAMI.2019.2941941",
102+
"source": "crossref"
103+
},
104+
{
105+
"doi": "10.1109/JSSC.2014.2342715",
106+
"source": "crossref"
107+
},
108+
{
109+
"doi": "10.1109/ICCV51070.2023.01148",
110+
"source": "crossref"
111+
},
112+
{
113+
"doi": "10.36227/techrxiv.21300960.v1",
114+
"source": "crossref"
115+
},
116+
{
117+
"doi": "10.1109/CVPR52729.2023.01336",
118+
"source": "crossref"
119+
},
120+
{
121+
"doi": "10.1109/ICCV48922.2021.00419",
122+
"source": "crossref"
123+
},
124+
{
125+
"doi": "10.1109/ICCV48922.2021.00450",
126+
"source": "crossref"
127+
},
128+
{
129+
"doi": "10.1109/CVPR52688.2022.01705",
130+
"source": "crossref"
131+
},
132+
{
133+
"doi": "10.1145/358669.358692",
134+
"source": "crossref"
135+
},
136+
{
137+
"doi": "10.1109/ICCPhot.2012.6215213",
138+
"source": "crossref"
139+
},
140+
{
141+
"doi": "10.1109/CVPR52688.2022.01728",
142+
"source": "crossref"
143+
},
144+
{
145+
"doi": "10.1109/CVPR.2018.00938",
146+
"source": "crossref"
147+
},
148+
{
149+
"doi": "10.1109/CVPR.2019.00830",
150+
"source": "crossref"
151+
},
152+
{
153+
"doi": "10.1109/CVPR.2018.00504",
154+
"source": "crossref"
155+
},
156+
{
157+
"doi": "10.1109/CVPR52729.2023.00406",
158+
"source": "crossref"
159+
},
160+
{
161+
"doi": "10.1109/JSSC.2007.914337",
162+
"source": "crossref"
163+
},
164+
{
165+
"doi": "10.1007/978-3-030-58598-3_41",
166+
"source": "crossref"
167+
},
168+
{
169+
"doi": "10.1109/CVPR42600.2020.00598",
170+
"source": "crossref"
171+
},
172+
{
173+
"doi": "10.1109/ICCV.2017.478",
174+
"source": "crossref"
175+
},
176+
{
177+
"doi": "10.1109/CVPR.2017.244",
178+
"source": "crossref"
179+
},
180+
{
181+
"doi": "10.1109/ICCV.2017.37",
182+
"source": "crossref"
183+
},
184+
{
185+
"doi": "10.1007/978-3-031-72952-2_10",
186+
"source": "crossref"
187+
},
188+
{
189+
"doi": "10.1109/CVPR.2013.179",
190+
"source": "crossref"
191+
},
192+
{
193+
"doi": "10.1109/CVPR.2019.00698",
194+
"source": "crossref"
195+
},
196+
{
197+
"doi": "10.1109/WACV.2018.00104",
198+
"source": "crossref"
199+
},
200+
{
201+
"doi": "10.1109/ICCV.2017.101",
202+
"source": "crossref"
203+
},
204+
{
205+
"doi": "10.1109/ICCV.2019.00098",
206+
"source": "crossref"
207+
},
208+
{
209+
"doi": "10.1109/CVPR.2016.303",
210+
"source": "crossref"
211+
},
212+
{
213+
"doi": "10.1109/CVPR.2017.252",
214+
"source": "crossref"
215+
},
216+
{
217+
"doi": "10.1007/978-3-319-24574-4_28",
218+
"source": "crossref"
219+
},
220+
{
221+
"doi": "10.1109/CVPR.2016.445",
222+
"source": "crossref"
223+
},
224+
{
225+
"doi": "10.1109/CVPR52688.2022.01584",
226+
"source": "crossref"
227+
},
228+
{
229+
"doi": "10.1109/CVPR.2018.00931",
230+
"source": "crossref"
231+
},
232+
{
233+
"doi": "10.1109/CVPR46437.2021.01589",
234+
"source": "crossref"
235+
},
236+
{
237+
"doi": "10.1109/CVPR52688.2022.01723",
238+
"source": "crossref"
239+
},
240+
{
241+
"doi": "10.1007/978-3-030-58601-0_10",
242+
"source": "crossref"
243+
},
244+
{
245+
"doi": "10.1109/ICCV48922.2021.00258",
246+
"source": "crossref"
247+
},
248+
{
249+
"doi": "10.1007/978-3-031-20071-7_5",
250+
"source": "crossref"
251+
},
252+
{
253+
"doi": "10.1109/CVPR52688.2022.01724",
254+
"source": "crossref"
255+
},
256+
{
257+
"doi": "10.1016/j.cviu.2024.104094",
258+
"source": "crossref"
259+
},
260+
{
261+
"doi": "10.1109/CVPR46437.2021.00910",
262+
"source": "crossref"
263+
},
264+
{
265+
"doi": "10.1007/978-3-031-20071-7_14",
266+
"source": "crossref"
267+
},
268+
{
269+
"doi": "10.1109/CVPR52688.2022.01725",
270+
"source": "crossref"
271+
},
272+
{
273+
"doi": "10.15607/RSS.2018.XIV.062",
274+
"source": "crossref"
275+
},
276+
{
277+
"doi": "10.1109/ICCV.2017.244",
278+
"source": "crossref"
279+
},
280+
{
281+
"doi": "10.1109/ICCV.2017.108",
282+
"source": "crossref"
283+
},
284+
{
285+
"doi": "10.1109/CVPR.2019.00468",
286+
"source": "crossref"
287+
}
288+
],
289+
"bibtex": {
290+
"pages": "3762\u20133780",
291+
"month": "jan",
292+
"year": 2025,
293+
"author": "Lin, Mingyuan and Wang, Yangguang and Zhang, Xiang and Shi, Boxin and Yang, Wen and He, Chu and Xia, Gui-song and Yu, Lei",
294+
"publisher": "Springer Science and Business Media LLC",
295+
"journal": "International Journal of Computer Vision",
296+
"number": "6",
297+
"doi": "10.1007/s11263-025-02364-z",
298+
"url": "http://dx.doi.org/10.1007/s11263-025-02364-z",
299+
"issn": "1573-1405",
300+
"volume": "133",
301+
"title": "Self-supervised Shutter Unrolling with Events",
302+
"type": "article",
303+
"key": "Lin_2025"
304+
}
305+
}
306+
---
307+
308+
# Dataset Details

datasets/DVS-UP-Fall.md

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
---
2+
{
3+
"name": "DVS-UP-Fall",
4+
"aliases": [],
5+
"year": 2025,
6+
"modalities": [
7+
"Vision"
8+
],
9+
"sensors": [
10+
"V2E",
11+
"V2CE"
12+
],
13+
"other_sensors": [],
14+
"category": "Human-centric Recordings",
15+
"tags": [
16+
"Fall Detection"
17+
],
18+
"description": "Fall detection dataset",
19+
"dataset_properties": {
20+
"available_online": true,
21+
"has_real_data": false,
22+
"has_simulated_data": true,
23+
"has_ground_truth": true,
24+
"has_frames": true,
25+
"has_biases": false,
26+
"distribution_methods": [
27+
"Zenodo"
28+
],
29+
"file_formats": [
30+
"Numpy"
31+
],
32+
"availability_comment": "",
33+
"dataset_links": [
34+
{
35+
"name": "Zenodo",
36+
"url": "https://zenodo.org/records/15532612",
37+
"format": "Numpy",
38+
"available": true
39+
}
40+
],
41+
"size_gb": 8.2,
42+
"size_type": "Compressed"
43+
},
44+
"paper": {
45+
"title": "",
46+
"doi": "",
47+
"authors": [],
48+
"abstract": "",
49+
"open_access": false
50+
},
51+
"citation_counts": [],
52+
"links": [
53+
{
54+
"type": "github_page",
55+
"url": "https://github.com/supungamlath/DVS-UP-Fall/tree/main"
56+
}
57+
],
58+
"full_name": "",
59+
"additional_metadata": {}
60+
}
61+
---
62+
63+
# Dataset Description
64+
65+
The DVS-UP-Fall dataset is an event-based version of the publicly available UP-Fall Detection dataset, tailored for advancing fall detection research using neuromorphic vision sensors and spiking neural networks. This dataset has been generated by converting RGB video data to event streams using two leading event conversion toolkits: v2e (video-to-events) and v2ce (video-to-continuous-events).
66+
67+
The dataset is structured into two main ZIP archives:
68+
69+
1. dvs-up-fall-v2e-dataset-160.zip: This archive contains event data generated using the v2e toolkit.
70+
2. dvs-up-fall-v2ce-dataset-160.zip: This archive contains event data generated using the v2ce toolkit.
71+
Label Files
72+
To support supervised learning and benchmarking, we provide the following label files.
73+
74+
CompleteDataSet.csv: Original UP-Fall labels including IMU, EEG, and IR sensor data.
75+
76+
labels_multiclass_w1.0.csv: Multiclass activity labels for 1.0-second non-overlapping windows.
77+
78+
labels_binary_w1.0.csv: Binary (fall vs. no-fall) labels for 1.0-second non-overlapping windows.
79+
80+
labels_multiclass_w0.5.csv: Multiclass activity labels for 0.5-second windows.
81+
82+
labels_binary_w0.5.csv: Binary labels for 0.5-second windows.
83+
84+
This dataset supports both binary and multiclass activity classification tasks using event-based data and is suitable for training and evaluating spiking neural networks (SNNs).
85+
86+
All data is organized per subject, activity, trial, and camera, consistent with the original UP-Fall dataset structure.

0 commit comments

Comments
 (0)