-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathindex_backup.html
More file actions
620 lines (551 loc) · 28.1 KB
/
index_backup.html
File metadata and controls
620 lines (551 loc) · 28.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Primary Meta Tags -->
<!-- TODO: Replace with your paper title and author names -->
<meta name="title" content="World-VLA-Loop: Closed-Loop Learning of Video World Model and VLA Policy">
<!-- TODO: Write a compelling 150-160 character description of your research -->
<meta name="description" content="World-VLA-Loop is a framework that establishes a mutually beneficial co-evolving cycle between a robotic world model and a VLA policy to enable efficient reinforcement learning with minimal physical interaction.">
<!-- TODO: Add 5-10 relevant keywords for your research area -->
<meta name="keywords" content="Video Generation, World Model, Robotics, VLA, Reinforcement Learning">
<!-- TODO: List all authors -->
<meta name="author" content="Xiaokang Liu, Zechen Bai, Hai Ci, Kevin Yuchen Ma, Mike Zheng Shou">
<meta name="robots" content="index, follow">
<meta name="language" content="English">
<!-- Open Graph / Facebook -->
<meta property="og:type" content="article">
<!-- TODO: Replace with your institution or lab name -->
<meta property="og:site_name" content="Show Lab, National University of Singapore">
<!-- TODO: Same as paper title above -->
<meta property="og:title" content="World-VLA-Loop: Closed-Loop Learning of Video World Model and VLA Policy">
<!-- TODO: Same as description above -->
<meta property="og:description" content="World-VLA-Loop is a framework that establishes a mutually beneficial co-evolving cycle between a robotic world model and a VLA policy to enable efficient reinforcement learning with minimal physical interaction.">
<!-- TODO: Replace with your actual website URL -->
<meta property="og:url" content="https://hiskiv.github.io/World-VLA-Loop/">
<!-- TODO: Create a 1200x630px preview image and update path -->
<meta property="og:image" content="https://imgur.com/a/k6raYAC">
<meta property="og:image:width" content="1200">
<meta property="og:image:height" content="630">
<meta property="og:image:alt" content="PAPER_TITLE - Research Preview">
<meta property="article:published_time" content="2024-01-01T00:00:00.000Z">
<meta property="article:author" content="FIRST_AUTHOR_NAME">
<meta property="article:section" content="Research">
<meta property="article:tag" content="KEYWORD1">
<meta property="article:tag" content="KEYWORD2">
<!-- Twitter (skip) -->
<meta name="twitter:card" content="summary_large_image">
<!-- TODO: Replace with your lab/institution Twitter handle -->
<meta name="twitter:site" content="@YOUR_TWITTER_HANDLE">
<!-- TODO: Replace with first author's Twitter handle -->
<meta name="twitter:creator" content="@AUTHOR_TWITTER_HANDLE">
<!-- TODO: Same as paper title above -->
<meta name="twitter:title" content="PAPER_TITLE">
<!-- TODO: Same as description above -->
<meta name="twitter:description" content="BRIEF_DESCRIPTION_OF_YOUR_RESEARCH_CONTRIBUTION_AND_FINDINGS">
<!-- TODO: Same as social preview image above -->
<meta name="twitter:image" content="https://YOUR_DOMAIN.com/static/images/social_preview.png">
<meta name="twitter:image:alt" content="PAPER_TITLE - Research Preview">
<!-- Academic/Research Specific (skip) -->
<meta name="citation_title" content="PAPER_TITLE">
<meta name="citation_author" content="FIRST_AUTHOR_LAST, FIRST_AUTHOR_FIRST">
<meta name="citation_author" content="SECOND_AUTHOR_LAST, SECOND_AUTHOR_FIRST">
<meta name="citation_publication_date" content="2024">
<meta name="citation_conference_title" content="CONFERENCE_NAME">
<meta name="citation_pdf_url" content="https://YOUR_DOMAIN.com/static/pdfs/paper.pdf">
<!-- Additional SEO -->
<meta name="theme-color" content="#2563eb">
<meta name="msapplication-TileColor" content="#2563eb">
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="default">
<!-- Preconnect for performance -->
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link rel="preconnect" href="https://ajax.googleapis.com">
<link rel="preconnect" href="https://documentcloud.adobe.com">
<link rel="preconnect" href="https://cdn.jsdelivr.net">
<!-- TODO: Replace with your paper title and authors -->
<title>World-VLA-Loop: Closed-Loop Learning of Video World Model and VLA Policy - Xiaokang Liu et al. | Academic Research</title>
<!-- Favicon and App Icons -->
<link rel="icon" type="image/x-icon" href="static/images/favicon.ico">
<link rel="apple-touch-icon" href="static/images/favicon.ico">
<!-- Critical CSS - Load synchronously -->
<link rel="stylesheet" href="static/css/bulma.min.css">
<link rel="stylesheet" href="static/css/index.css">
<!-- Non-critical CSS - Load asynchronously -->
<link rel="preload" href="static/css/bulma-carousel.min.css" as="style" onload="this.onload=null;this.rel='stylesheet'">
<link rel="preload" href="static/css/bulma-slider.min.css" as="style" onload="this.onload=null;this.rel='stylesheet'">
<link rel="preload" href="static/css/fontawesome.all.min.css" as="style" onload="this.onload=null;this.rel='stylesheet'">
<link rel="preload" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css" as="style" onload="this.onload=null;this.rel='stylesheet'">
<!-- Fallback for browsers that don't support preload -->
<noscript>
<link rel="stylesheet" href="static/css/bulma-carousel.min.css">
<link rel="stylesheet" href="static/css/bulma-slider.min.css">
<link rel="stylesheet" href="static/css/fontawesome.all.min.css">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
</noscript>
<!-- Fonts - Optimized loading -->
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&display=swap" rel="stylesheet">
<!-- Defer non-critical JavaScript -->
<script defer src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script defer src="https://documentcloud.adobe.com/view-sdk/main.js"></script>
<script defer src="static/js/fontawesome.all.min.js"></script>
<script defer src="static/js/bulma-carousel.min.js"></script>
<script defer src="static/js/bulma-slider.min.js"></script>
<script defer src="static/js/index.js"></script>
<!-- Structured Data for Academic Papers -->
<script type="application/ld+json">
{
"@context": "https://schema.org",
"@type": "ScholarlyArticle",
"headline": "PAPER_TITLE",
"description": "BRIEF_DESCRIPTION_OF_YOUR_RESEARCH_CONTRIBUTION_AND_FINDINGS",
"author": [
{
"@type": "Person",
"name": "FIRST_AUTHOR_NAME",
"affiliation": {
"@type": "Organization",
"name": "INSTITUTION_NAME"
}
},
{
"@type": "Person",
"name": "SECOND_AUTHOR_NAME",
"affiliation": {
"@type": "Organization",
"name": "INSTITUTION_NAME"
}
}
],
"datePublished": "2024-01-01",
"publisher": {
"@type": "Organization",
"name": "CONFERENCE_OR_JOURNAL_NAME"
},
"url": "https://YOUR_DOMAIN.com/YOUR_PROJECT_PAGE",
"image": "https://YOUR_DOMAIN.com/static/images/social_preview.png",
"keywords": ["KEYWORD1", "KEYWORD2", "KEYWORD3", "machine learning", "computer vision"],
"abstract": "FULL_ABSTRACT_TEXT_HERE",
"citation": "BIBTEX_CITATION_HERE",
"isAccessibleForFree": true,
"license": "https://creativecommons.org/licenses/by/4.0/",
"mainEntity": {
"@type": "WebPage",
"@id": "https://YOUR_DOMAIN.com/YOUR_PROJECT_PAGE"
},
"about": [
{
"@type": "Thing",
"name": "RESEARCH_AREA_1"
},
{
"@type": "Thing",
"name": "RESEARCH_AREA_2"
}
]
}
</script>
<!-- Website/Organization Structured Data -->
<script type="application/ld+json">
{
"@context": "https://schema.org",
"@type": "Organization",
"name": "INSTITUTION_OR_LAB_NAME",
"url": "https://YOUR_INSTITUTION_WEBSITE.com",
"logo": "https://YOUR_DOMAIN.com/static/images/favicon.ico",
"sameAs": [
"https://twitter.com/YOUR_TWITTER_HANDLE",
"https://github.com/YOUR_GITHUB_USERNAME"
]
}
</script>
</head>
<body>
<!-- Scroll to Top Button -->
<button class="scroll-to-top" onclick="scrollToTop()" title="Scroll to top" aria-label="Scroll to top">
<i class="fas fa-chevron-up"></i>
</button>
<main id="main-content">
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<!-- TODO: Replace with your paper title -->
<h1 class="title is-1 publication-title">World-VLA-Loop : Closed-Loop Learning of Video World Model and VLA Policy</h1>
<div class="is-size-5 publication-authors">
<!-- TODO: Replace with your paper authors and their personal links -->
<span class="author-block">
<a href="https://scholar.google.com/citations?user=dAEHm8AAAAAJ&hl=en" target="_blank">Xiaokang Liu</a><sup>*</sup>,</span>
<span class="author-block">
<a href="https://scholar.google.com/citations?user=aIdQ8GwAAAAJ&hl=en" target="_blank">Zechen Bai</a><sup>*</sup>,</span>
<span class="author-block">
<a href="https://scholar.google.com/citations?user=GMrjppAAAAAJ&hl=en" target="_blank">Hai Ci</a>,</span>
<span class="author-block">
<a href="https://scholar.google.com/citations?user=_6xM_IcAAAAJ&hl=en" target="_blank">Kevin Yuchen Ma</a>,</span>
<span class="author-block">
<a href="https://scholar.google.com/citations?user=h1-3lSoAAAAJ&hl=en" target="_blank">Mike Zheng Shou</a>
</span>
</div>
<div class="is-size-5 publication-authors">
<!-- TODO: Replace with your institution and conference/journal info -->
<span class="author-block">Show Lab<br>National University of Singapore</span>
<!-- TODO: Remove this line if no equal contribution -->
<span class="eql-cntrb"><small><br><sup>*</sup>Indicates Equal Contribution</small></span>
</div>
<div class="column has-text-centered">
<div class="publication-links">
<!-- TODO: Update with your arXiv paper ID -->
<span class="link-block">
<a href="https://arxiv.org/pdf/<ARXIV PAPER ID>.pdf" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Paper</span>
</a>
</span>
<!-- TODO: Add your supplementary material PDF or remove this section -->
<!-- <span class="link-block">
<a href="static/pdfs/supplementary_material.pdf" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Supplementary</span>
</a>
</span> -->
<!-- TODO: Replace with your GitHub repository URL -->
<span class="link-block">
<a href="https://github.com/hiskiv/World-VLA-Loop-Code" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
</span>
<!-- TODO: Update with your arXiv paper ID -->
<span class="link-block">
<a href="https://arxiv.org/abs/<ARXIV PAPER ID>" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="ai ai-arxiv"></i>
</span>
<span>arXiv</span>
</a>
</span>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Teaser figure-->
<section class="hero teaser">
<div class="container is-max-desktop">
<div class="hero-body">
<!-- Teaser original a video -->
<!-- <video poster="" id="tree" autoplay controls muted loop height="100%" preload="metadata"> -->
<!-- TODO: Add your video file path here -->
<!-- <source src="static/videos/banner_video.mp4" type="video/mp4"> -->
<!-- </video> -->
<!-- Teaser figure -->
<div style="text-align: center;">
<img src="figures/teaser.png" alt="World-VLA-Loop Teaser" style="width: 100%; max-width: 100%; height: auto;" />
</div>
<!-- TODO: Replace with your figure description -->
<h2 class="subtitle has-text-centered">
(a) Paradigms for world-model-based VLA reinforcement learning. Existing methodologies typically rely on reconstructing the environment within 3D world or training video world models that simulate the environment. To address the imprecise action-following inherent in existing video-based simulators, we propose World-VLA-Loop, a closed-loop paradigm that jointly optimizes the world model and the VLA policy to iteratively enhance the performance and grounding of both. (b) We show that the real-world policy success rate is improved by 36.7% after two iterations of joint optimization with VLA model and world model.
</h2>
</div>
</div>
</section>
<!-- End teaser figure -->
<!-- Paper abstract -->
<section class="section hero is-light">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">Abstract</h2>
<div class="content has-text-justified">
<!-- TODO: Replace with your paper abstract -->
<p>
Recent progress in robotic world models has leveraged video diffusion transformers to predict future observations conditioned on historical states and actions. While these models can simulate realistic visual outcomes, they often exhibit poor action-following precision, hindering their utility for downstream robotic learning. In this work, we introduce World-VLA-Loop, a closed-loop framework for the joint refinement of world models and Vision-Language-Action (VLA) policies. We propose a state-aware video world model that functions as a high-fidelity interactive simulator by jointly predicting future observations and reward signals. To enhance reliability, we introduce the Sans dataset, which incorporates near-success trajectories to improve action-outcome alignment within the world model. This framework enables a closed-loop for reinforcement learning (RL) post-training of VLA policies entirely within a virtual environment. Crucially, our approach facilitates a co-evolving cycle: failure rollouts generated by the VLA policy are iteratively fed back to refine the world model’s precision, which in turn enhances subsequent RL optimization. Evaluations across simulation and real-world tasks demonstrate that our framework significantly boosts VLA performance with minimal physical interaction, establishing a mutually beneficial relationship between world modeling and policy learning for general-purpose robotics.
</p>
</div>
</div>
</div>
</div>
</section>
<!-- End paper abstract -->
<section class="section hero is-small">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column is-full">
<div class="content">
<div class="level-set has-text-justified">
<p>
Our hypothesis is that probing methods, when done right, hold significant potential. Drawing inspiration
from binary code analysis, where dynamic approaches are more common than static ones, we believe that
running neural networks, i.e., probing, is a promising approach for weight
space learning. We begin with 2 preliminary experiments to test the quality and potential of probing
approaches:
<ol>
<li>Comparing a vanilla probing baseline to previous graph based and mechanistic approaches. With enough probes: (a) vanilla probing performs better than graph approaches
that does not use probing. (b) Graph approaches become equivalent to probing only when they also use probing features.
</li>
<li>Comparing learned probes and probes from randomly selected data. We show that synthetic probes are equally effective as latent optimized ones.</li>
</ol>
</p>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<section class="hero is-small">
<div class="hero-body">
<div class="container">
<div id="results-carousel" class="carousel results-carousel">
<div class="item">
<!-- Your image here -->
<img src="static/images/graph_vs_probing__space.png" alt="A table comparing probing to previous approaches"/>
<h2 class="subtitle has-text-centered">
<em><b>Vanilla Probing vs. Other Approaches.</b></em> Comparing a vanilla probing approach with previous
graph based and mechanistic approaches (numbers of probes in brackets).
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/synthetic_data_space.png" alt="Comparing synthetic data to learnt probes"/>
<h2 class="subtitle has-text-centered">
<em><b>Latent Optimized Probes vs. Synthetic Data as Probes.</b></em> Comparing learned probes and probes from randomly selected data.
</h2>
</div>
</div>
</div>
</div>
</section>
<section class="section hero is-small">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column is-full">
<div class="content">
<div class="level-set has-text-justified">
<p>
We propose <em>Deep Linear Probe Generators</em> (<strong>ProbeGen</strong>) for learning better probes. ProbeGen optimizes a
deep generator module limited to linear expressivity, that shares information between the
different probes. It then observes the responses from all probes, and trains an MLP classifier on
them. While simple, we demonstrate it greatly enhances probing methods, and also outperforms
other approaches by a large margin.
</p>
</div>
</div>
</div>
</div>
</div>
</section>
<section class="hero teaser">
<div class="container is-max-desktop">
<div class="hero-body">
<img src="static/images/ProbeGen_results.png" alt="Main results of our method ProbeGen"/>
</div>
</div>
</section>
<section class="section hero is-small">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column is-full">
<div class="content">
<div class="level-set has-text-justified">
<p>
ProbeGen represents each model as an ordered list of output
values based on carefully chosen probes. These representations often have semantic meanings as
the output space of the model (here, image pixels or logits) are semantic by design.
</p>
</div>
</div>
</div>
</div>
</div>
</section>
<section class="section hero is-small">
<div class="hero-body">
<div class="container">
<div id="results-carousel" class="carousel results-carousel">
<div class="item">
<!-- Your image here -->
<img src="static/images/mnist_queries.png" alt="MNIST INR Representation visualization"/>
<h2 class="subtitle has-text-centered">
<em><b>MNIST INR Representations.</b></em> ProbeGen chooses object centric locations as suitable for this task,
while Vanilla Probing chooses locations scattered around the image, including pixels far out of the image.
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/cifar_queries.png" alt="CIFAR10 Wild Park Representation visualization"/>
<h2 class="subtitle has-text-centered">
<em><b>CIFAR10 Wild Park Representations.</b></em> The values become more uniform as the accuracy of the models
decreases, and sharper as it increases. This suggests that ProbeGen uses some form of prediction
entropy in its classifier. We validate this by training a classifier that only takes the
entropy of each probe as its features, which already reaches a Kendall’s τ of 0.877.
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/probes_comp_flat_space.png" alt="Comparing the probes learned from different algorithms"/>
<h2 class="subtitle has-text-centered">
<em><b>ProbeGen vs. Vanilla Probing Learned Probes.</b></em> Although both not interpetable by humans,
it is clear that ProbeGen probes have much more structure than latent-optimized ones.
</h2>
</div>
</div>
</div>
</div>
</section>
<!-- Image carousel -->
<section class="hero is-small">
<div class="hero-body">
<div class="container">
<div id="results-carousel" class="carousel results-carousel">
<div class="item">
<!-- TODO: Replace with your research result images -->
<img src="static/images/carousel1.jpg" alt="First research result visualization" loading="lazy"/>
<!-- TODO: Replace with description of this result -->
<h2 class="subtitle has-text-centered">
First image description.
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/carousel2.jpg" alt="Second research result visualization" loading="lazy"/>
<h2 class="subtitle has-text-centered">
Second image description.
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/carousel3.jpg" alt="Third research result visualization" loading="lazy"/>
<h2 class="subtitle has-text-centered">
Third image description.
</h2>
</div>
<div class="item">
<!-- Your image here -->
<img src="static/images/carousel4.jpg" alt="Fourth research result visualization" loading="lazy"/>
<h2 class="subtitle has-text-centered">
Fourth image description.
</h2>
</div>
</div>
</div>
</div>
</section>
<!-- End image carousel -->
<!-- Youtube video -->
<section class="hero is-small is-light">
<div class="hero-body">
<div class="container">
<!-- Paper video. -->
<h2 class="title is-3">Video Presentation</h2>
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<div class="publication-video">
<!-- TODO: Replace with your YouTube video ID -->
<iframe src="https://www.youtube.com/embed/JkaxUblCGz0" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- End youtube video -->
<!-- Video carousel -->
<section class="hero is-small">
<div class="hero-body">
<div class="container">
<h2 class="title is-3">Another Carousel</h2>
<div id="results-carousel" class="carousel results-carousel">
<div class="item item-video1">
<!-- TODO: Add poster image for better preview -->
<video poster="" id="video1" controls muted loop height="100%" preload="metadata">
<!-- Your video file here -->
<source src="static/videos/carousel1.mp4" type="video/mp4">
</video>
</div>
<div class="item item-video2">
<!-- TODO: Add poster image for better preview -->
<video poster="" id="video2" controls muted loop height="100%" preload="metadata">
<!-- Your video file here -->
<source src="static/videos/carousel2.mp4" type="video/mp4">
</video>
</div>
<div class="item item-video3">
<!-- TODO: Add poster image for better preview -->
<video poster="" id="video3" controls muted loop height="100%" preload="metadata">
<!-- Your video file here -->
<source src="static/videos/carousel3.mp4" type="video/mp4">
</video>
</div>
</div>
</div>
</div>
</section>
<!-- End video carousel -->
<!-- Paper poster -->
<section class="hero is-small is-light">
<div class="hero-body">
<div class="container">
<h2 class="title">Poster</h2>
<!-- TODO: Replace with your poster PDF -->
<iframe src="static/pdfs/sample.pdf" width="100%" height="550">
</iframe>
</div>
</div>
</section>
<!--End paper poster -->
<!--BibTex citation -->
<section class="section" id="BibTeX">
<div class="container is-max-desktop content">
<div class="bibtex-header">
<h2 class="title">BibTeX</h2>
<button class="copy-bibtex-btn" onclick="copyBibTeX()" title="Copy BibTeX to clipboard">
<i class="fas fa-copy"></i>
<span class="copy-text">Copy</span>
</button>
</div>
<pre id="bibtex-code"><code>@article{YourPaperKey2024,
title={Your Paper Title Here},
author={First Author and Second Author and Third Author},
journal={Conference/Journal Name},
year={2024},
url={https://your-domain.com/your-project-page}
}</code></pre>
</div>
</section>
<!--End BibTex citation -->
<footer class="footer">
<div class="container">
<div class="columns is-centered">
<div class="column is-8">
<div class="content">
<p>
This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page.
You are free to borrow the source code of this website, we just ask that you link back to this page in the footer. <br> This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/" target="_blank">Creative
Commons Attribution-ShareAlike 4.0 International License</a>.
</p>
</div>
</div>
</div>
</div>
</footer>
<!-- Statcounter tracking code -->
<!-- You can add a tracker to track page visits by creating an account at statcounter.com -->
<!-- End of Statcounter Code -->
</body>
</html>