@@ -174,6 +174,7 @@ <h2>Preprint</h2>
174174 < div class ="trend-entry d-flex ">
175175 < div class ="trend-contents ">
176176 < ul >
177+
177178 < li > TrustGen Team.
178179 < br > < b style ="color:rgb(71, 71, 71) "> "On the Trustworthiness of Generative Foundation Models – Guideline, Assessment, and Perspective"</ b >
179180 < br > Arxiv 2025. < a href ="https://arxiv.org/abs/2502.14296 "> [Paper]</ a > < a href ="https://trustgen.github.io/ "> [Project]</ a > </ li >
@@ -187,50 +188,48 @@ <h2>Preprint</h2>
187188 < br > < b style ="color:rgb(71, 71, 71) "> "AutoTrust: Benchmarking Trustworthiness in Large Vision Language Models for Autonomous Driving"</ b >
188189 < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2412.15206 "> [Paper]</ a > < a href ="https://taco-group.github.io/AutoTrust/ "> [Project]</ a > </ li >
189190
190- < li > X. Xing, C. Qian, Y. Wang, H. Hua, K. Tian, Y. Zhou, Z. Tu
191- < br > < b style ="color:rgb(71, 71, 71) "> "OpenEMMA: Open-Source Multimodal Model for End-to-End Autonomous Driving"</ b >
192- < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2412.15208 "> [Paper]</ a > < a href ="https://github.com/taco-group/OpenEMMA "> [Code]</ a > </ li >
191+
193192
194193 < li > L. Li, J. Li, ..., Z. Tu, ..., Y. Zhao, Y. Dong
195194 < br > < b style ="color:rgb(71, 71, 71) "> "Political-llm: Large language models in political science"</ b >
196195 < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2412.06864 "> [Paper]</ a > < a href ="http://political-llm.org/ "> [Project]</ a > </ li >
197196
198- < li > Z. Wang, J. Guo, J. Zhu, Y. Li, H. Huang, M. Chen, Z. Tu
199- < br > < b style ="color:rgb(71, 71, 71) "> "SleeperMark: Towards Robust Watermark against Fine-Tuning Text-to-image Diffusion Models"</ b >
200- < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2412.04852 "> [Paper]</ a > < a href ="https://github.com/taco-group/SleeperMark "> [Code]</ a > </ li >
197+ <!-- <li>Z. Wang, J. Guo, J. Zhu, Y. Li, H. Huang, M. Chen, Z. Tu-->
198+ <!-- <br> <b style="color:rgb(71, 71, 71)">"SleeperMark: Towards Robust Watermark against Fine-Tuning Text-to-image Diffusion Models"</b>-- >
199+ <!-- <br>Arxiv 2024. <a href="https://arxiv.org/abs/2412.04852">[Paper]</a> <a href="https://github.com/taco-group/SleeperMark">[Code]</a></li>-- >
201200
202201 < li > Q. Zheng, Y. Fan, L. Huang, T. Zhu, J. Liu, Z. Hao, X. Shuo, C.J. Chen, X. Min, A. Bovik, Z. Tu
203202 < br > < b style ="color:rgb(71, 71, 71) "> "Video Quality Assessment: A Comprehensive Survey"</ b >
204203 < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2412.04508 "> [Paper]</ a > < a href ="https://github.com/taco-group/Video-Quality-Assessment-A-Comprehensive-Survey "> [Code]</ a > </ li >
205204
206- < li > H. Wang, Y. Zhang, R. Bai, Y. Zhao, S. Liu, Z. Tu
207- < br > < b style ="color:rgb(71, 71, 71) "> "Edit Away and My Face Will not Stay: Personal Biometric Defense against Malicious Generative Editing"</ b >
208- < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2411.16832 "> [Paper]</ a > < a href ="https://github.com/taco-group/FaceLock "> [Code]</ a > </ li >
205+ <!-- <li>H. Wang, Y. Zhang, R. Bai, Y. Zhao, S. Liu, Z. Tu-->
206+ <!-- <br> <b style="color:rgb(71, 71, 71)">"Edit Away and My Face Will not Stay: Personal Biometric Defense against Malicious Generative Editing"</b>-- >
207+ <!-- <br>Arxiv 2024. <a href="https://arxiv.org/abs/2411.16832">[Paper]</a> <a href="https://github.com/taco-group/FaceLock">[Code]</a></li>-- >
209208
210209
211- < li > S. Li, H. Gong, H. Dong, T. Yang, Z. Tu, Y. Zhao
212- < br > < b style ="color:rgb(71, 71, 71) "> "DPU: Dynamic Prototype Updating for Multimodal Out-of-Distribution Detection"</ b >
213- < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2411.08227 "> [Paper]</ a > < a href ="https://github.com/lili0415/DPU-OOD-Detection "> [Code]</ a > </ li >
210+ <!-- <li>S. Li, H. Gong, H. Dong, T. Yang, Z. Tu, Y. Zhao-->
211+ <!-- <br> <b style="color:rgb(71, 71, 71)">"DPU: Dynamic Prototype Updating for Multimodal Out-of-Distribution Detection"</b>-- >
212+ <!-- <br>Arxiv 2024. <a href="https://arxiv.org/abs/2411.08227">[Paper]</a> <a href="https://github.com/lili0415/DPU-OOD-Detection">[Code]</a></li>-- >
214213
215- < li > R. Li, P. Pan, B. Yang, D. Xu, S. Zhou, X. Zhang, Z. Li, A. Kadambi, Z. Wang, Z. Tu, Z. Fan
216- < br > < b style ="color:rgb(71, 71, 71) "> "4K4DGen: Panoramic 4D Generation at 4K Resolution"</ b >
217- < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2406.13527 "> [Paper]</ a > < a href ="https://4k4dgen.github.io/ "> [Project]</ a > </ li >
214+ <!-- <li>R. Li, P. Pan, B. Yang, D. Xu, S. Zhou, X. Zhang, Z. Li, A. Kadambi, Z. Wang, Z. Tu, Z. Fan-->
215+ <!-- <br> <b style="color:rgb(71, 71, 71)">"4K4DGen: Panoramic 4D Generation at 4K Resolution"</b>-- >
216+ <!-- <br>Arxiv 2024. <a href="https://arxiv.org/abs/2406.13527">[Paper]</a> <a href="https://4k4dgen.github.io/">[Project]</a></li>-- >
218217
219218 < li > T. Zhu, Q. Liu, F. Wang, Z. Tu, and M. Chen
220219 < br > < b style ="color:rgb(71, 71, 71) "> "Unraveling Cross-Modality Knowledge Conflict in Large Vision-Language Models"</ b >
221220 < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2410.03659 "> [Paper]</ a > < a href ="https://github.com/luka-group/vlm-knowledge-conflict "> [Code]</ a > </ li >
222221
223- < li > J. Li, X. Liu, B. Li, R. Xu, J. Li, H. Yu, and Z. Tu
224- < br > < b style ="color:rgb(71, 71, 71) "> "CoMamba: Real-time Cooperative Perception Unlocked with State Space Models"</ b >
225- < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2409.10699 "> [Paper]</ a > < a href ="https://github.com/taco-group/CoMamba "> [Code]</ a > </ li >
222+ <!-- <li>J. Li, X. Liu, B. Li, R. Xu, J. Li, H. Yu, and Z. Tu-->
223+ <!-- <br> <b style="color:rgb(71, 71, 71)">"CoMamba: Real-time Cooperative Perception Unlocked with State Space Models"</b>-- >
224+ <!-- <br>Arxiv 2024. <a href="https://arxiv.org/abs/2409.10699">[Paper]</a> <a href="https://github.com/taco-group/CoMamba">[Code]</a></li>-- >
226225
227226<!-- <li>K. Mei, Z. Tu, M. Delbracio, H. Talebi, V.M. Patel, P. Milanfar-->
228227<!-- <br> <b style="color:rgb(71, 71, 71)">"Bigger is not Always Better: Scaling Properties of Latent Diffusion Models"</b>-->
229228<!-- <br>Arxiv 2024. <a href="https://arxiv.org/abs/2404.01367">[Paper]</a> </li>-->
230229
231- < li > B Li, J Li, X Liu, R Xu, Z Tu, J Guo, X Li, H Yu
232- < br > < b style ="color:rgb(71, 71, 71) "> "V2X-DGW: Domain Generalization for Multi-agent Perception under Adverse Weather Conditions"</ b >
233- < br > Arxiv 2024. < a href ="https://arxiv.org/abs/2403.11371 "> [Paper]</ a > </ li >
230+ <!-- <li>B Li, J Li, X Liu, R Xu, Z Tu, J Guo, X Li, H Yu-->
231+ <!-- <br> <b style="color:rgb(71, 71, 71)">"V2X-DGW: Domain Generalization for Multi-agent Perception under Adverse Weather Conditions"</b>-- >
232+ <!-- <br>Arxiv 2024. <a href="https://arxiv.org/abs/2403.11371">[Paper]</a> </li>-- >
234233 </ ul >
235234 </ div >
236235 </ div >
@@ -377,6 +376,11 @@ <h3>
377376 < a href ="https://github.com/taco-group/STAMP "> [Code]</ a >
378377 </ li >
379378
379+ < li > X. Xing, C. Qian, Y. Wang, H. Hua, K. Tian, Y. Zhou, Z. Tu
380+ < br > < b style ="color:rgb(71, 71, 71) "> "OpenEMMA: Open-Source Multimodal Model for End-to-End Autonomous Driving"</ b >
381+ < br > Winter Conference on Applications of Computer Vision (WACV) Workshop, 2024. < a href ="https://arxiv.org/abs/2412.15208 "> [Paper]</ a > < a href ="https://github.com/taco-group/OpenEMMA "> [Code]</ a >
382+ </ li >
383+
380384 </ ul >
381385 </ div >
382386 </ div >
391395 < div class ="trend-contents ">
392396 < ul >
393397
398+
394399 < li > C.-J. Chen, R. Xu, W. Shao, J. Zhang, Z. Tu
395400 < br > < b style ="color:rgb(71, 71, 71) "> "OpenCDA-∞: A Closed-loop Benchmarking Platform for End-to-end Evaluation of Cooperative Perception"</ b >
396401 < br > NeurIPS Dataset and Benchmark Track (NeurIPS), 2024.
@@ -420,6 +425,18 @@ <h3>
420425 < a href ="https://openaccess.thecvf.com/content/CVPR2024/html/Li_Light_the_Night_A_Multi-Condition_Diffusion_Framework_for_Unpaired_Low-Light_CVPR_2024_paper.html "> [Paper]</ a > < a href ="https://github.com/jinlong17/LightDiff "> [Code]</ a >
421426 </ li >
422427
428+
429+ < li > C. He, Q. Zheng, R. Zhu, X. Zeng, Y. Fan, Z. Tu,
430+
431+ < br > < b style ="color:rgb(71, 71, 71) "> "COVER: A comprehensive video quality evaluator"</ b >
432+ < br > IEEE/CVF Computer Vision and Pattern Recognition (CVPR) Workshops, 2024.
433+ <!-- <a href="https://mobility-lab.seas.ucla.edu/v2v4real/">[Project]</a>-->
434+ < a href ="https://openaccess.thecvf.com/content/CVPR2024W/AI4Streaming/html/He_COVER_A_Comprehensive_Video_Quality_Evaluator_CVPRW_2024_paper.html "> [Paper]</ a > < a href ="https://github.com/vztu/COVER "> [Code]</ a >
435+ < br > < b > < font color ="red "> 🏆 1st place solution for < a href ="https://openaccess.thecvf.com/content/CVPR2024W/AI4Streaming/html/Conde_AIS_2024_Challenge_on_Video_Quality_Assessment_of_User-Generated_Content_CVPRW_2024_paper.html "> AIS 2024 UGC Video Quality Assessment Challenge</ a > </ font > </ b >
436+ < br > < b > < font color ="red "> 3rd place solution for < a href ="https://arxiv.org/abs/2408.11982 "> AIM 2024 Challenge on Compressed Video Quality Assessment</ a > </ font > </ b >
437+
438+ </ li >
439+
423440 </ ul >
424441 </ div >
425442 </ div >
@@ -512,6 +529,17 @@ <h3>
512529 < a href ="https://arxiv.org/abs/2201.02973 "> [Paper]</ a > < a href ="https://github.com/google-research/maxim "> [Code]</ a >
513530 < br > < b > < font color ="red "> Best paper nomination award (0.4% of 8161 submissions)</ font > </ b >
514531 </ li >
532+
533+
534+ < li > X. Yu, Z. Tu, Z. Ying, A.C. Bovik, N. Birkbeck, Y. Wang, B. Adsumilli
535+
536+ < br > < b style ="color:rgb(71, 71, 71) "> "Subjective quality assessment of user-generated content gaming videos"</ b >
537+ < br > IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) Workshops, 2022.
538+ <!-- <a href="https://mobility-lab.seas.ucla.edu/v2v4real/">[Project]</a>-->
539+ < a href ="https://openaccess.thecvf.com/content/WACV2022W/VAQ/html/Yu_Subjective_Quality_Assessment_of_User-Generated_Content_Gaming_Videos_WACVW_2022_paper.html "> [Paper]</ a > < a href ="https://live.ece.utexas.edu/research/LIVE-YT-Gaming/index.html "> [Dataset]</ a >
540+ </ li >
541+
542+
515543 </ ul >
516544 </ div >
517545 </ div >
555583 </ li >
556584
557585
586+
558587 </ ul >
559588 </ div >
560589
@@ -594,45 +623,27 @@ <h3>
594623
595624
596625
597- < div class ="section-title " style ="margin-bottom: 20px ">
598- < h2 > Workshop Paper</ h2 >
599- </ div >
600- <!-- <div class="section-title" style="margin-bottom: 10px">-->
601- <!-- <h3>-->
602- <!-- <font size="4"> 2024</font>-->
603- <!-- </h3>-->
626+ <!-- <div class="section-title" style="margin-bottom: 20px">-->
627+ <!-- <h2>Workshop Paper</h2>-->
604628<!-- </div>-->
605- < div class ="trend-entry d-flex ">
606- < div class ="trend-contents ">
607- < ul >
608-
609- < li > C. He, Q. Zheng, R. Zhu, X. Zeng, Y. Fan, Z. Tu,
610-
611- < br > < b style ="color:rgb(71, 71, 71) "> "COVER: A comprehensive video quality evaluator"</ b >
612- < br > IEEE/CVF Computer Vision and Pattern Recognition (CVPR) Workshops, 2024.
613- <!-- <a href="https://mobility-lab.seas.ucla.edu/v2v4real/">[Project]</a>-->
614- < a href ="https://openaccess.thecvf.com/content/CVPR2024W/AI4Streaming/html/He_COVER_A_Comprehensive_Video_Quality_Evaluator_CVPRW_2024_paper.html "> [Paper]</ a > < a href ="https://github.com/vztu/COVER "> [Code]</ a >
615- < br > < b > < font color ="red "> 🏆 1st place solution for < a href ="https://openaccess.thecvf.com/content/CVPR2024W/AI4Streaming/html/Conde_AIS_2024_Challenge_on_Video_Quality_Assessment_of_User-Generated_Content_CVPRW_2024_paper.html "> AIS 2024 UGC Video Quality Assessment Challenge</ a > </ font > </ b >
616- < br > < b > < font color ="red "> 3rd place solution for < a href ="https://arxiv.org/abs/2408.11982 "> AIM 2024 Challenge on Compressed Video Quality Assessment</ a > </ font > </ b >
629+ <!--<!– <div class="section-title" style="margin-bottom: 10px">–>-->
630+ <!--<!– <h3>–>-->
631+ <!--<!– <font size="4"> 2024</font>–>-->
632+ <!--<!– </h3>–>-->
633+ <!--<!– </div>–>-->
634+ <!-- <div class="trend-entry d-flex">-->
635+ <!-- <div class="trend-contents">-->
636+ <!-- <ul>-->
617637
618- </ li >
619638
620639
621640
622- < li > X. Yu, Z. Tu, Z. Ying, A.C. Bovik, N. Birkbeck, Y. Wang, B. Adsumilli
623-
624- < br > < b style ="color:rgb(71, 71, 71) "> "Subjective quality assessment of user-generated content gaming videos"</ b >
625- < br > IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) Workshops, 2022.
626- <!-- <a href="https://mobility-lab.seas.ucla.edu/v2v4real/">[Project]</a>-->
627- < a href ="https://openaccess.thecvf.com/content/WACV2022W/VAQ/html/Yu_Subjective_Quality_Assessment_of_User-Generated_Content_Gaming_Videos_WACVW_2022_paper.html "> [Paper]</ a > < a href ="https://live.ece.utexas.edu/research/LIVE-YT-Gaming/index.html "> [Dataset]</ a >
628- </ li >
629-
630641
631642
632643
633644
634- </ ul >
635- </ div >
645+ <!-- </ul>-- >
646+ <!-- </div>-- >
636647
637648 </ div >
638649
0 commit comments