1717 < script defer src ="https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.16.3/contrib/auto-render.min.js " onload ="renderMathInElement(document.body); "> </ script >
1818
1919 < style >
20+
21+ .task-section {
22+ display : flex;
23+ flex-wrap : wrap;
24+ gap : 20px ;
25+ align-items : flex-start;
26+ margin-top : 10px ;
27+ }
28+
29+ .task-image-wrapper {
30+ flex : 1 1 300px ;
31+ max-width : 40% ;
32+ }
33+
34+ .task-image-wrapper img {
35+ width : 100% ;
36+ height : auto;
37+ border-radius : 8px ;
38+ border : 1px solid # ddd ;
39+ }
40+
41+ .task-text {
42+ flex : 1 1 55% ;
43+ min-width : 250px ;
44+ }
45+
46+
2047 body {
2148 font-family : 'Segoe UI' , Tahoma, Geneva, Verdana, sans-serif;
2249 background-color : # f9f9f9 ;
@@ -277,10 +304,17 @@ <h2>Abstract</h2>
277304 </ p >
278305
279306 < h2 > Task Definition</ h2 >
280- < img src ="assets/images/UOD.svg " alt ="UOD Example " class ="task-image " width ="400 " height ="200 " loading ="lazy ">
281- < p >
282- < strong > Unobserved Object Detection (UOD)</ strong > refers to inferring objects beyond direct view—outside the frame or occluded. We explore this in 2D (partial views), 2.5D (with depth), and full 3D scene inference.
283- </ p >
307+ < div class ="task-section ">
308+ < div class ="task-image-wrapper ">
309+ < img src ="assets/images/UOD.svg " alt ="UOD Example " loading ="lazy " />
310+ </ div >
311+ < div class ="task-text ">
312+ < p >
313+ < strong > Unobserved Object Detection (UOD)</ strong > refers to inferring objects beyond direct view—outside the frame or occluded. We explore this in 2D (partial views), 2.5D (with depth), and full 3D scene inference.
314+ </ p >
315+ </ div >
316+ </ div >
317+
284318
285319 < h2 > Paper</ h2 >
286320 < div class ="resources ">
@@ -299,14 +333,12 @@ <h2>Code</h2>
299333 </ p >
300334 </ div >
301335
302-
303-
304336 < h2 > Results</ h2 >
305337 < figure class ="zoom-container ">
306338 < img id ="results-image " src ="assets/images/results.png " alt ="Detection Results " class ="zoomable-image " loading ="lazy ">
307339 < figcaption >
308340 < strong > Figure:</ strong > Each row shows spatial predictions by different models across object types (TV, fridge, sink, laptop).
309- Triangle = camera; dashed lines = frustum; star = ground truth. Warmer heatmap = higher object likelihood.
341+ Triangle = camera; dashed lines = frustum; star = ground truth. Warmer heatmap = higher object likelihood. Probabilities are normalized across the heatmap.
310342 </ figcaption >
311343 </ figure >
312344
@@ -321,8 +353,7 @@ <h2>Cite As</h2>
321353 title={{Believing is Seeing}: Unobserved Object Detection using Generative Models},
322354 author={Bhattacharjee, Subhransu S. and Campbell, Dylan and Shome, Rahul},
323355 booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
324- year={2025},
325- note={To Appear}
356+ year={2025}
326357}</ pre >
327358 </ div >
328359 </ div >
0 commit comments