@article{M70795D0D, title = "Metamorphic Test-Based Robustness Evaluation of Object Detection Models in Safety Critical Systems", journal = "The Journal of Korean Institute of Communications and Information Sciences", year = "2025", issn = "1226-4717", doi = "10.7840/kics.2025.50.7.995", author = "Jun-Cheol Suh, Jung-Been Lee, Jeong-Dong Kim, Taek Lee", keywords = "object detection, metamorphic test, robustness evaluation, safety critical system, adversarial attacks", abstract = "In recent years, artificial intelligence (AI) technology has made rapid progress and is widely used in various fields such as natural language processing and computer vision. However, the black-box nature of AI models raises reliability concerns, particularly in safety-critical systems (ScSs) such as autonomous vehicles (AVs) and medical systems, where understanding model behavior is crucial. This study proposes a robustness evaluation method for AV object detection models (ODMs) by integrating metamorphic testing and adversarial attack techniques. Using a YOLOv5-based ODM, the model was tested to detect essential objects in traffic safety scenarios, including pedestrians, traffic lights, and road signs. Various image augmentations simulating physical deformations (e.g., rotation, tilt) and weather conditions (e.g., snow, rain) were applied to measure model robustness. Results showed that model performance varied significantly with specific augmentations, revealing vulnerabilities. Notably, the pedestrian detection model, despite its high accuracy during training, showed only 35.8% robustness against augmented images. This highlights that even high-performing models in general conditions may fail under diverse environmental factors. These findings underscore the importance of rigorous robustness evaluation for AI models in ScSs to ensure reliability and safety in real-world applications like AVs." }