@article{MFB02ACC9, title = "TBNs-Based Bidirectional Fusion Method For Real-Time Multi-Modal Segmentation", journal = "The Journal of Korean Institute of Communications and Information Sciences", year = "2025", issn = "1226-4717", doi = "10.7840/kics.2025.50.1.22", author = "Yunsik Shin, Yongho Son, Junghee Park, Chaehyun Lee, Yanggon Kim, Jun Won Choi", keywords = "2D Semantic Segmentation, Multi-modal Fusion, Depth map Fusion", abstract = "In the field of autonomous driving and robotics, image segmentation that perceives the shape of the surrounding environment and objects has become an essential part. While image segmentation using cameras performs segmentation through various color information, it has limitations in utilizing spatial information of the environment. To address this, research has been conducted to achieve higher reliability by fusing sensor data such as depth images or LiDAR. However, most studies involve complex fusion techniques to achieve high performance, which inherently poses problems that are not feasible to implement in limited real-time operational environments. This study proposes an efficient fusion technique that leverages the characteristics of camera and depth images to enable real-time operation. The proposed bidirectional fusion model achieves a performance improvement of 1.27 mIoU over the camera model while showing an operating speed of 16.32 FPS, making real-time operation possible." }