@article{M3BAC5537, title = "The Impact of Dataset on Offline Reinforcement Learning of Multiple UAVs for Flying Ad-hoc Network Formation", journal = "The Journal of Korean Institute of Communications and Information Sciences", year = "2024", issn = "1226-4717", doi = "10.7840/kics.2024.49.7.1002", author = "Dongsu Lee, Minhae Kwon", keywords = "Artificial Intelligence, Multi-agent Reinforcement Learning, Offline Reinforcement Learning, Ad-hoc Network", abstract = "A Flying Ad-hoc Network (FANET) is a network of airborne mobile nodes that can communicate independently, and can be utilized as a fallback network during a crisis, such as a disaster or war, when existing infrastructures are damaged. Several Unmanned Aerial Vehicles (UAVs) with communication and routing capabilities can travel to areas where communication is not possible and establish a FANET. In this study, we consider a scenario where multiple UAVs trained through offline reinforcement learning establish a FANET without a centralized control. We conducted experiments to compare the performance of multi-agent’s network construction by datasets and offline reinforcement learning algorithms, and analyzed the learning aspects that depend on the features of datasets and algorithms." }