Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2024
Sukthanker, Rhea Sanjay; Zela, Arber; Staffler, Benedikt; Dooley, Samuel; Grabocka, Josif; Hutter, Frank
Multi-objective Differentiable Neural Architecture Search Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-18213,
title = {Multi-objective Differentiable Neural Architecture Search},
author = {Rhea Sanjay Sukthanker and Arber Zela and Benedikt Staffler and Samuel Dooley and Josif Grabocka and Frank Hutter},
url = {https://doi.org/10.48550/arXiv.2402.18213},
doi = {10.48550/ARXIV.2402.18213},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.18213},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gao, Peng; Liu, Xiao; Wang, Yu; Yuan, Ru-Yue
Searching a Lightweight Network Architecture for Thermal Infrared Pedestrian Tracking Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-16570,
title = {Searching a Lightweight Network Architecture for Thermal Infrared Pedestrian Tracking},
author = {Peng Gao and Xiao Liu and Yu Wang and Ru-Yue Yuan},
url = {https://doi.org/10.48550/arXiv.2402.16570},
doi = {10.48550/ARXIV.2402.16570},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.16570},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Rui; Zhang, Peng-Yun; Gao, Mei-Rong; Ma, Jian-Zhe; Pan, Li-Hu
Low-cost architecture performance evaluation strategy based on pixel difference degree contrast measurement Journal Article
In: Applied Soft Computing, vol. 155, pp. 111440, 2024, ISSN: 1568-4946.
@article{ZHANG2024111440,
title = {Low-cost architecture performance evaluation strategy based on pixel difference degree contrast measurement},
author = {Rui Zhang and Peng-Yun Zhang and Mei-Rong Gao and Jian-Zhe Ma and Li-Hu Pan},
url = {https://www.sciencedirect.com/science/article/pii/S156849462400214X},
doi = {https://doi.org/10.1016/j.asoc.2024.111440},
issn = {1568-4946},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Applied Soft Computing},
volume = {155},
pages = {111440},
abstract = {The time and effort required to manually design deep neural architectures is extremely high, which has led to the development of neural architecture search technology as an automatic architecture design method. However, the neural architecture search convergence process is slow and expensive, and the process requires training a large number of candidate architectures to get the final result. If the final accuracy of an architecture can be predicted from its initial state, this problem can be greatly alleviated. Therefore, this paper proposes a low-cost architecture performance evaluation strategy based on pixel difference degree contrast measurement, which takes 1) the difference matrix value between the feature map generated in the untrained architecture and the original image, and 2) the predicted accuracy of the neural network as evaluation indices. A new multi-index weight comprehensive measurement strategy was introduced to comprehensively score the multi-index, the real architecture performance can be approximately represented by score, which greatly reduces the cost of architecture evaluation. The experimental show that the proposed scoring strategy is highly correlated with real architecture accuracy. In the practical engineering application research, this strategy can search a high-performance architecture with an accuracy of 96.2% within 343.3 s, which proves that the proposed strategy can significantly improve the search efficiency in practical applications, reduce the subjectivity of artificial architecture design, and promote the application of practical time-consuming projects.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Risso, Matteo; Daghero, Francesco; Motetti, Beatrice Alessandra; Pagliari, Daniele Jahier; Macii, Enrico; Poncino, Massimo; Burrello, Alessio
Optimized Deployment of Deep Neural Networks for Visual Pose Estimation on Nano-drones Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-15273,
title = {Optimized Deployment of Deep Neural Networks for Visual Pose Estimation on Nano-drones},
author = {Matteo Risso and Francesco Daghero and Beatrice Alessandra Motetti and Daniele Jahier Pagliari and Enrico Macii and Massimo Poncino and Alessio Burrello},
url = {https://doi.org/10.48550/arXiv.2402.15273},
doi = {10.48550/ARXIV.2402.15273},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.15273},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Garg, Manav S.
Automated Machine Learning: Evaluation without Training Journal Article
In: 2024.
@article{Garg_2024,
title = {Automated Machine Learning: Evaluation without Training},
author = {Manav S. Garg},
url = {http://dx.doi.org/10.36227/techrxiv.170595826.64565617/v1},
doi = {10.36227/techrxiv.170595826.64565617/v1},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Huang, Hongtao; Chang, Xiaojun; Hu, Wen; Yao, Lina
MatchNAS: Optimizing Edge AI in Sparse-Label Data Contexts via Automating Deep Neural Network Porting for Mobile Deployment Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-13525,
title = {MatchNAS: Optimizing Edge AI in Sparse-Label Data Contexts via Automating Deep Neural Network Porting for Mobile Deployment},
author = {Hongtao Huang and Xiaojun Chang and Wen Hu and Lina Yao},
url = {https://doi.org/10.48550/arXiv.2402.13525},
doi = {10.48550/ARXIV.2402.13525},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.13525},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Cao, Tue Minh; Tran, Nhat Hong; Pham, Hieu H.; Nguyen, Hung Thanh; Nguyen, Le P.
MSTAR: Multi-Scale Backbone Architecture Search for Timeseries Classification Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-13822,
title = {MSTAR: Multi-Scale Backbone Architecture Search for Timeseries Classification},
author = {Tue Minh Cao and Nhat Hong Tran and Hieu H. Pham and Hung Thanh Nguyen and Le P. Nguyen},
url = {https://doi.org/10.48550/arXiv.2402.13822},
doi = {10.48550/ARXIV.2402.13822},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.13822},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Putra, Rachmad Vidya Wicaksana; Shafique, Muhammad
SpikeNAS: A Fast Memory-Aware Neural Architecture Search Framework for Spiking Neural Network Systems Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-11322b,
title = {SpikeNAS: A Fast Memory-Aware Neural Architecture Search Framework for Spiking Neural Network Systems},
author = {Rachmad Vidya Wicaksana Putra and Muhammad Shafique},
url = {https://doi.org/10.48550/arXiv.2402.11322},
doi = {10.48550/ARXIV.2402.11322},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.11322},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Bouzidi, Halima; Niar, Sma"ıl; Ouarnoughi, Hamza; Talbi, El-Ghazali
SONATA: Self-adaptive Evolutionary Framework for Hardware-aware Neural Architecture Search Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-13204,
title = {SONATA: Self-adaptive Evolutionary Framework for Hardware-aware Neural Architecture Search},
author = {Halima Bouzidi and Sma"ıl Niar and Hamza Ouarnoughi and El-Ghazali Talbi},
url = {https://doi.org/10.48550/arXiv.2402.13204},
doi = {10.48550/ARXIV.2402.13204},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.13204},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Kundu, Souvik; Sarah, Anthony; Joshi, Vinay; Omer, Om Ji; Subramoney, Sreenivas
CiMNet: Towards Joint Optimization for DNN Architecture and Configuration for Compute-In-Memory Hardware Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-11780,
title = {CiMNet: Towards Joint Optimization for DNN Architecture and Configuration for Compute-In-Memory Hardware},
author = {Souvik Kundu and Anthony Sarah and Vinay Joshi and Om Ji Omer and Sreenivas Subramoney},
url = {https://doi.org/10.48550/arXiv.2402.11780},
doi = {10.48550/ARXIV.2402.11780},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.11780},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Putra, Rachmad Vidya Wicaksana; Shafique, Muhammad
SpikeNAS: A Fast Memory-Aware Neural Architecture Search Framework for Spiking Neural Network Systems Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-11322,
title = {SpikeNAS: A Fast Memory-Aware Neural Architecture Search Framework for Spiking Neural Network Systems},
author = {Rachmad Vidya Wicaksana Putra and Muhammad Shafique},
url = {https://doi.org/10.48550/arXiv.2402.11322},
doi = {10.48550/ARXIV.2402.11322},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.11322},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Junjue; Zhong, Yanfei; Ma, Ailong; Zheng, Zhuo; Wan, Yuting; Zhang, Liangpei
LoveNAS: Towards multi-scene land-cover mapping via hierarchical searching adaptive network Journal Article
In: ISPRS Journal of Photogrammetry and Remote Sensing, vol. 209, pp. 265-278, 2024, ISSN: 0924-2716.
@article{WANG2024265,
title = {LoveNAS: Towards multi-scene land-cover mapping via hierarchical searching adaptive network},
author = {Junjue Wang and Yanfei Zhong and Ailong Ma and Zhuo Zheng and Yuting Wan and Liangpei Zhang},
url = {https://www.sciencedirect.com/science/article/pii/S0924271624000200},
doi = {https://doi.org/10.1016/j.isprsjprs.2024.01.011},
issn = {0924-2716},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
volume = {209},
pages = {265-278},
abstract = {Land-cover information reflects basic Earth’s surface environments and is critical to human settlements. As a well-established deep learning architecture, the fully convolutional network has achieved impressive progress in various land-cover mapping tasks. However, most research has focused on designing powerful encoders, ignoring the exploration of decoders. The existing handcrafted decoders are relatively simple and lack flexibility, limiting the generalizability for complex remote sensing scenes. In this paper, we propose a Land-cOVEr mapping Neural Architecture Search framework (LoveNAS) to automatically find efficient decoders that are compatible with the encoders and tasks. Specifically, LoveNAS introduces a hierarchical dense search space, including densely connected layer-level and multi-scale operation-level search spaces. The search spaces contain independent connection and operation fusion strategies, facilitating sufficient interaction of multi-scale features. After searching based on large-scale datasets, a series of pre-trained encoders and adaptive decoders are obtained. These can be smoothly applied to multi-scene tasks using weight-transfer network training. Experimental results on normal and disaster scenes shows that LoveNAS outperforms 16 handcrafted architectures and NAS methods. Some searched structures coincide with the existing advanced artificial designs, revealing the potential value of LoveNAS in network design and guidance. Group’s website: http://rsidea.whu.edu.cn/resource_sharing.htm. GitHub page: https://github.com/Junjue-Wang/LoveNAS.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Xin; Tian, Jie; Duan, Peiyong; Yu, Qian; Wang, Gaige; Wang, Yingjie
GrMoNAS: A granularity-based multi-objective NAS framework for efficient medical diagnosis Journal Article
In: Computers in Biology and Medicine, vol. 171, pp. 108118, 2024, ISSN: 0010-4825.
@article{LIU2024108118,
title = {GrMoNAS: A granularity-based multi-objective NAS framework for efficient medical diagnosis},
author = {Xin Liu and Jie Tian and Peiyong Duan and Qian Yu and Gaige Wang and Yingjie Wang},
url = {https://www.sciencedirect.com/science/article/pii/S0010482524002026},
doi = {https://doi.org/10.1016/j.compbiomed.2024.108118},
issn = {0010-4825},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Computers in Biology and Medicine},
volume = {171},
pages = {108118},
abstract = {Neural Architecture Search (NAS) has been widely applied to automate medical image diagnostics. However, traditional NAS methods require significant computational resources and time for performance evaluation. To address this, we introduce the GrMoNAS framework, designed to balance diagnostic accuracy and efficiency using proxy datasets for granularity transformation and multi-objective optimization algorithms. The approach initiates with a coarse granularity phase, wherein diverse candidate neural architectures undergo evaluation utilizing a reduced proxy dataset. This initial phase facilitates the swift and effective identification of architectures exhibiting promise. Subsequently, in the fine granularity phase, a comprehensive validation and optimization process is undertaken for these identified architectures. Concurrently, employing multi-objective optimization and Pareto frontier sorting aims to enhance both accuracy and computational efficiency simultaneously. Importantly, the GrMoNAS framework is particularly suitable for hospitals with limited computational resources. We evaluated GrMoNAS in a range of medical scenarios, such as COVID-19, Skin cancer, Lung, Colon, and Acute Lymphoblastic Leukemia diseases, comparing it against traditional models like VGG16, VGG19, and recent NAS approaches including GA-CNN, EBNAS, NEXception, and CovNAS. The results show that GrMoNAS achieves comparable or superior diagnostic precision, significantly enhancing diagnostic efficiency. Moreover, GrMoNAS effectively avoids local optima, indicating its significant potential for precision medical diagnosis.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yan, Jiaqi; Liu, Qianhui; Zhang, Malu; Feng, Lang; Ma, De; Li, Haizhou; Pan, Gang
Efficient spiking neural network design via neural architecture search Journal Article
In: Neural Networks, vol. 173, pp. 106172, 2024, ISSN: 0893-6080.
@article{YAN2024106172,
title = {Efficient spiking neural network design via neural architecture search},
author = {Jiaqi Yan and Qianhui Liu and Malu Zhang and Lang Feng and De Ma and Haizhou Li and Gang Pan},
url = {https://www.sciencedirect.com/science/article/pii/S0893608024000960},
doi = {https://doi.org/10.1016/j.neunet.2024.106172},
issn = {0893-6080},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Neural Networks},
volume = {173},
pages = {106172},
abstract = {Spiking neural networks (SNNs) are brain-inspired models that utilize discrete and sparse spikes to transmit information, thus having the property of energy efficiency. Recent advances in learning algorithms have greatly improved SNN performance due to the automation of feature engineering. While the choice of neural architecture plays a significant role in deep learning, the current SNN architectures are mainly designed manually, which is a time-consuming and error-prone process. In this paper, we propose a spiking neural architecture search (NAS) method that can automatically find efficient SNNs. To tackle the challenge of long search time faced by SNNs when utilizing NAS, the proposed NAS encodes candidate architectures in a branchless spiking supernet which significantly reduces the computation requirements in the search process. Considering that real-world tasks prefer efficient networks with optimal accuracy under a limited computational budget, we propose a Synaptic Operation (SynOps)-aware optimization to automatically find the computationally efficient subspace of the supernet. Experimental results show that, in less search time, our proposed NAS can find SNNs with higher accuracy and lower computational cost than state-of-the-art SNNs. We also conduct experiments to validate the search process and the trade-off between accuracy and computational cost.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Qin, Ruiyang; Hu, Yuting; Yan, Zheyu; Xiong, Jinjun; Abbasi, Ahmed; Shi, Yiyu
FL-NAS: Towards Fairness of NAS for Resource Constrained Devices via Large Language Models Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-06696,
title = {FL-NAS: Towards Fairness of NAS for Resource Constrained Devices via Large Language Models},
author = {Ruiyang Qin and Yuting Hu and Zheyu Yan and Jinjun Xiong and Ahmed Abbasi and Yiyu Shi},
url = {https://doi.org/10.48550/arXiv.2402.06696},
doi = {10.48550/ARXIV.2402.06696},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.06696},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Vellenga, Koen; Steinhauer, H. Joe; Karlsson, Alexander; Falkman, Göran; Rhodin, Asli; Koppisetty, Ashok
Designing deep neural networks for driver intention recognition Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-05150,
title = {Designing deep neural networks for driver intention recognition},
author = {Koen Vellenga and H. Joe Steinhauer and Alexander Karlsson and Göran Falkman and Asli Rhodin and Ashok Koppisetty},
url = {https://doi.org/10.48550/arXiv.2402.05150},
doi = {10.48550/ARXIV.2402.05150},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.05150},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hu, Liwei; Wang, Zidong; Li, Han; Wu, Peishu; Mao, Jingfeng; Zeng, Nianyin
ℓ-DARTS: Light-weight differentiable architecture search with robustness enhancement strategy Journal Article
In: Knowledge-Based Systems, vol. 288, pp. 111466, 2024, ISSN: 0950-7051.
@article{HU2024111466,
title = {ℓ-DARTS: Light-weight differentiable architecture search with robustness enhancement strategy},
author = {Liwei Hu and Zidong Wang and Han Li and Peishu Wu and Jingfeng Mao and Nianyin Zeng},
url = {https://www.sciencedirect.com/science/article/pii/S0950705124001011},
doi = {https://doi.org/10.1016/j.knosys.2024.111466},
issn = {0950-7051},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Knowledge-Based Systems},
volume = {288},
pages = {111466},
abstract = {In this paper, a novel light-weight differentiable architecture search (ℓ-DARTS) model is proposed to address the challenge of balancing search efficiency and accuracy commonly faced in the neural architecture search (NAS) field. By reducing the model depth, the fast search on a simplified structure with less redundancy is facilitated by the proposed ℓ-DARTS as compared to the original DARTS. To bridge the discrepancy of semantic information among channels and mitigate potential accuracy degradation, a channel fusion compensation module is introduced. Furthermore, an enhanced regularization technique with a margin value is employed, which ensures thorough consideration of all candidate operations, thereby effectively reducing the preference for parameter-free operations during the search stage and consequently preventing performance collapse of the searched architecture. The proposed ℓ-DARTS is evaluated in various DARTS search spaces on three datasets, which achieves an accuracy of 97.54% ± 0.03 with 0.06 GPU-Days on CIFAR-10, and also demonstrates strong generalization on the target datasets CIFAR-100 and ImageNet with optimal accuracy, indicating its significant competitiveness against other leading DARTS variants. Moreover, the efficacy of the employed strategies is confirmed through extensive experiments, which promotes fair competition among candidate operations and favors the acquisition of a robust architecture.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xiang, Mingxi; Ding, Rui; Liu, Haijun; Zhou, Xichuan
Latency-Constrained Neural Architecture Search Method for Efficient Model Deployment on RISC-V Devices Journal Article
In: Electronics, vol. 13, no. 4, 2024, ISSN: 2079-9292.
@article{electronics13040692,
title = {Latency-Constrained Neural Architecture Search Method for Efficient Model Deployment on RISC-V Devices},
author = {Mingxi Xiang and Rui Ding and Haijun Liu and Xichuan Zhou},
url = {https://www.mdpi.com/2079-9292/13/4/692},
doi = {10.3390/electronics13040692},
issn = {2079-9292},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Electronics},
volume = {13},
number = {4},
abstract = {The rapid development of the RISC-V instruction set architecture (ISA) has garnered significant attention in the realm of deep neural network applications. While hardware-aware neural architecture search (NAS) methods for ARM, X86, and GPUs have been extensively explored, research specifically targeting RISC-V remains limited. In light of this, we propose a latency-constrained NAS (LC-NAS) method specifically designed for RISC-V. This method enables efficient network searches without the requirement of network training. Concretely, in the training-free NAS framework, we introduce an RISC-V latency evaluation module that includes two implementations: a lookup table and a latency predictor based on a deep neural network. To obtain real latency data, we have designed a specialized data collection pipeline for RISC-V devices, which allows for precise end-to-end hardware latency measurements. We validate the effectiveness of our method in the NAS-Bench-201 search space. Experimental results demonstrate that our method can efficiently search for latency-constrained networks for RISC-V devices within seconds while maintaining high accuracy. Additionally, our method can easily integrate with existing training-free NAS approaches.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cao, Chunhong; Yi, Hongbo; Xiang, Han; He, Pan; Hu, Jing; Xiao, Fen; Gao, Xieping
Accelerated Sparse-Coding-Inspired Feedback Neural Architecture Search for Hyperspectral Image Classification Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, vol. 62, pp. 1-14, 2024.
@article{10426762,
title = {Accelerated Sparse-Coding-Inspired Feedback Neural Architecture Search for Hyperspectral Image Classification},
author = {Chunhong Cao and Hongbo Yi and Han Xiang and Pan He and Jing Hu and Fen Xiao and Xieping Gao},
url = {https://ieeexplore.ieee.org/abstract/document/10426762},
doi = {10.1109/TGRS.2024.3363777},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
volume = {62},
pages = {1-14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wu, Fan; Gao, Jinling; Hong, Lanqing; Wang, Xinbing; Zhou, Chenghu; Ye, Nanyang
G-NAS: Generalizable Neural Architecture Search for Single Domain Generalization Object Detection Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-04672,
title = {G-NAS: Generalizable Neural Architecture Search for Single Domain Generalization Object Detection},
author = {Fan Wu and Jinling Gao and Lanqing Hong and Xinbing Wang and Chenghu Zhou and Nanyang Ye},
url = {https://doi.org/10.48550/arXiv.2402.04672},
doi = {10.48550/ARXIV.2402.04672},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.04672},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zeng, Junhua; Zhou, Guoxu; Li, Chao; Sun, Zhun; Zhao, Qibin
Discovering More Effective Tensor Network Structure Search Algorithms via Large Language Models (LLMs) Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-02456,
title = {Discovering More Effective Tensor Network Structure Search Algorithms via Large Language Models (LLMs)},
author = {Junhua Zeng and Guoxu Zhou and Chao Li and Zhun Sun and Qibin Zhao},
url = {https://doi.org/10.48550/arXiv.2402.02456},
doi = {10.48550/ARXIV.2402.02456},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.02456},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Dong, Peijie; Li, Lujun; Pan, Xinglin; Wei, Zimian; Liu, Xiang; Wang, Qiang; Chu, Xiaowen
ParZC: Parametric Zero-Cost Proxies for Efficient NAS Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-02105,
title = {ParZC: Parametric Zero-Cost Proxies for Efficient NAS},
author = {Peijie Dong and Lujun Li and Xinglin Pan and Zimian Wei and Xiang Liu and Qiang Wang and Xiaowen Chu},
url = {https://doi.org/10.48550/arXiv.2402.02105},
doi = {10.48550/ARXIV.2402.02105},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.02105},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Risso, Matteo; Xie, Chen; Daghero, Francesco; Burrello, Alessio; Mollaei, Seyedmorteza; Castellano, Marco; Macii, Enrico; Poncino, Massimo; Pagliari, Daniele Jahier
HW-SW Optimization of DNNs for Privacy-preserving People Counting on Low-resolution Infrared Arrays Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-01226,
title = {HW-SW Optimization of DNNs for Privacy-preserving People Counting on Low-resolution Infrared Arrays},
author = {Matteo Risso and Chen Xie and Francesco Daghero and Alessio Burrello and Seyedmorteza Mollaei and Marco Castellano and Enrico Macii and Massimo Poncino and Daniele Jahier Pagliari},
url = {https://doi.org/10.48550/arXiv.2402.01226},
doi = {10.48550/ARXIV.2402.01226},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.01226},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zouambi, Meyssa
Optimizing Deep Learning: Navigating the Field of Neural Architecture Search from Theory to Practice PhD Thesis
2024.
@phdthesis{Zouambi-phd24a,
title = {Optimizing Deep Learning: Navigating the Field of Neural Architecture Search from Theory to Practice},
author = {Meyssa Zouambi},
url = {https://hal.science/tel-04437745/document},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Zhang, Baochang; Wang, Tiancheng; Xu, Sheng; Doermann, David
Binary Neural Architecture Search Book Chapter
In: Neural Networks with Model Compression, pp. 49–99, Springer Nature Singapore, Singapore, 2024, ISBN: 978-981-99-5068-3.
@inbook{Zhang2024,
title = {Binary Neural Architecture Search},
author = {Baochang Zhang and Tiancheng Wang and Sheng Xu and David Doermann},
url = {https://doi.org/10.1007/978-981-99-5068-3_3},
doi = {10.1007/978-981-99-5068-3_3},
isbn = {978-981-99-5068-3},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Neural Networks with Model Compression},
pages = {49–99},
publisher = {Springer Nature Singapore},
address = {Singapore},
abstract = {Deep convolutional neural networks (DCNNs) have achieved state-of-the-art performance in various computer vision tasks, including image classification, instance segmentation, and object detection. The success of DCNNs is attributed to effective architecture design. Neural architecture search (NAS) is an emerging approach that automates the process of designing neural architectures, replacing manual design.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Tempel, Felix; Strümke, Inga; Ihlen, Espen Alexander Fürst
AutoGCN - Towards Generic Human Activity Recognition with Neural Architecture Search Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-01313,
title = {AutoGCN - Towards Generic Human Activity Recognition with Neural Architecture Search},
author = {Felix Tempel and Inga Strümke and Espen Alexander Fürst Ihlen},
url = {https://doi.org/10.48550/arXiv.2402.01313},
doi = {10.48550/ARXIV.2402.01313},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.01313},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Sheng; Wang, Maolin; Zhao, Yao; Zhuang, Chenyi; Gu, Jinjie; Guo, Ruocheng; Zhao, Xiangyu; Zhang, Zijian; Yin, Hongzhi
EASRec: Elastic Architecture Search for Efficient Long-term Sequential Recommender Systems Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2402-00390,
title = {EASRec: Elastic Architecture Search for Efficient Long-term Sequential Recommender Systems},
author = {Sheng Zhang and Maolin Wang and Yao Zhao and Chenyi Zhuang and Jinjie Gu and Ruocheng Guo and Xiangyu Zhao and Zijian Zhang and Hongzhi Yin},
url = {https://doi.org/10.48550/arXiv.2402.00390},
doi = {10.48550/ARXIV.2402.00390},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2402.00390},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Pižurica, Nikola; Pavlović, Kosta; Kovačević, Slavko; Jovančević, Igor; Prado, Miguel
In: Journal of Electronic Imaging, vol. 33, no. 3, pp. 031203, 2024.
@article{10.1117/1.JEI.33.3.031203,
title = {Generic neural architecture search toolkit for efficient and real-world deployment of visual inspection convolutional neural networks in industry},
author = {Nikola Pižurica and Kosta Pavlović and Slavko Kovačević and Igor Jovančević and Miguel Prado},
url = {https://doi.org/10.1117/1.JEI.33.3.031203},
doi = {10.1117/1.JEI.33.3.031203},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Journal of Electronic Imaging},
volume = {33},
number = {3},
pages = {031203},
publisher = {SPIE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sheng, Chunyin; Gao, Xiang; Hu, Xiaopeng; Wang, Fan
Differentiable Neural Architecture Search Based on Efficient Architecture for Lightweight Image Super-Resolution Proceedings Article
In: Rudinac, Stevan; Hanjalic, Alan; Liem, Cynthia; Worring, Marcel; Jónsson, Björn Þór; Liu, Bei; Yamakata, Yoko (Ed.): MultiMedia Modeling, pp. 169–183, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-53311-2.
@inproceedings{10.1007/978-3-031-53311-2_13b,
title = {Differentiable Neural Architecture Search Based on Efficient Architecture for Lightweight Image Super-Resolution},
author = {Chunyin Sheng and Xiang Gao and Xiaopeng Hu and Fan Wang},
editor = {Stevan Rudinac and Alan Hanjalic and Cynthia Liem and Marcel Worring and Björn Þór Jónsson and Bei Liu and Yoko Yamakata},
url = {https://link.springer.com/chapter/10.1007/978-3-031-53311-2_13},
isbn = {978-3-031-53311-2},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {MultiMedia Modeling},
pages = {169–183},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {With the advancement of deep neural networks, image Super-Resolution (SR) has witnessed remarkable improvements in performance. However, the increasing number of parameters and computational complexity has posed challenges for the practical deployment of SR models. To address these challenges, we propose a novel approach called Differentiable Neural Architecture Search (NAS) based on Efficient Architecture for lightweight image Super-Resolution, referred to as DNAS-EASR. In DNAS-EASR, we employ the information distillation mechanism (IDM) at the cell-level space to search for key operations. Additionally, we search for attention modules at the cell-level space to determine the most suitable attention module for our architecture. Furthermore, we adopt a hierarchical architecture as our backbone network to enable multi-scale information processing and fusion. Extensive experiments conducted on benchmark datasets demonstrate that DNAS-EASR is lightweight, efficient and capable of achieving comparable performance to other lightweight methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sheng, Chunyin; Gao, Xiang; Hu, Xiaopeng; Wang, Fan
Differentiable Neural Architecture Search Based on Efficient Architecture for Lightweight Image Super-Resolution Proceedings Article
In: Rudinac, Stevan; Hanjalic, Alan; Liem, Cynthia; Worring, Marcel; Jónsson, Björn Þór; Liu, Bei; Yamakata, Yoko (Ed.): MultiMedia Modeling, pp. 169–183, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-53311-2.
@inproceedings{10.1007/978-3-031-53311-2_13,
title = {Differentiable Neural Architecture Search Based on Efficient Architecture for Lightweight Image Super-Resolution},
author = {Chunyin Sheng and Xiang Gao and Xiaopeng Hu and Fan Wang},
editor = {Stevan Rudinac and Alan Hanjalic and Cynthia Liem and Marcel Worring and Björn Þór Jónsson and Bei Liu and Yoko Yamakata},
url = {https://link.springer.com/chapter/10.1007/978-3-031-53311-2_13},
isbn = {978-3-031-53311-2},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {MultiMedia Modeling},
pages = {169–183},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {With the advancement of deep neural networks, image Super-Resolution (SR) has witnessed remarkable improvements in performance. However, the increasing number of parameters and computational complexity has posed challenges for the practical deployment of SR models. To address these challenges, we propose a novel approach called Differentiable Neural Architecture Search (NAS) based on Efficient Architecture for lightweight image Super-Resolution, referred to as DNAS-EASR. In DNAS-EASR, we employ the information distillation mechanism (IDM) at the cell-level space to search for key operations. Additionally, we search for attention modules at the cell-level space to determine the most suitable attention module for our architecture. Furthermore, we adopt a hierarchical architecture as our backbone network to enable multi-scale information processing and fusion. Extensive experiments conducted on benchmark datasets demonstrate that DNAS-EASR is lightweight, efficient and capable of achieving comparable performance to other lightweight methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Shuaishuai; Sun, Yixiang; Sha, Yetong; Yang, Guangyu; Cheng, Dongzhou; Zhang, Lei; Wu, Hao; Song, Aiguo
Robust Human Activity Recognition via Wearable Sensors Using Dynamic Gaussian Kernel Learning Journal Article
In: IEEE Sensors Journal, pp. 1-1, 2024.
@article{10413952,
title = {Robust Human Activity Recognition via Wearable Sensors Using Dynamic Gaussian Kernel Learning},
author = {Shuaishuai Wang and Yixiang Sun and Yetong Sha and Guangyu Yang and Dongzhou Cheng and Lei Zhang and Hao Wu and Aiguo Song},
url = {https://ieeexplore.ieee.org/abstract/document/10413952},
doi = {10.1109/JSEN.2024.3355704},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {IEEE Sensors Journal},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gao, Zhaoqi; Wang, Kezheng; Wang, Zhiguo; Gao, Jinghuai
Optimizing Seismic Facies Classification Through Differentiable Network Architecture Search Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, vol. 62, pp. 1-12, 2024.
@article{10413525,
title = {Optimizing Seismic Facies Classification Through Differentiable Network Architecture Search},
author = {Zhaoqi Gao and Kezheng Wang and Zhiguo Wang and Jinghuai Gao},
url = {https://ieeexplore.ieee.org/abstract/document/10413525},
doi = {10.1109/TGRS.2024.3357929},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
volume = {62},
pages = {1-12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lyu, Bo; Yang, Yin; Cao, Yuting; Wang, Pengcheng; Zhu, Jian; Chang, Jingfei; Wen, Shiping
Efficient multi-objective neural architecture search framework via policy gradient algorithm Journal Article
In: Information Sciences, vol. 661, pp. 120186, 2024, ISSN: 0020-0255.
@article{LYU2024120186,
title = {Efficient multi-objective neural architecture search framework via policy gradient algorithm},
author = {Bo Lyu and Yin Yang and Yuting Cao and Pengcheng Wang and Jian Zhu and Jingfei Chang and Shiping Wen},
url = {https://www.sciencedirect.com/science/article/pii/S0020025524000999},
doi = {https://doi.org/10.1016/j.ins.2024.120186},
issn = {0020-0255},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Information Sciences},
volume = {661},
pages = {120186},
abstract = {Differentiable architecture search plays a prominent role in Neural Architecture Search (NAS) and exhibits preferable efficiency than traditional heuristic NAS methods, including those based on evolutionary algorithms (EA) and reinforcement learning (RL). However, differentiable NAS methods encounter challenges when dealing with non-differentiable objectives like energy efficiency, resource constraints, and other non-differentiable metrics, especially under multi-objective search scenarios. While the multi-objective NAS research addresses these challenges, the individual training required for each candidate architecture demands significant computational resources. To bridge this gap, this work combines the efficiency of the differentiable NAS with metrics compatibility in multi-objective NAS. The architectures are discretely sampled by the architecture parameter α within the differentiable NAS framework, and α are directly optimised by the policy gradient algorithm. This approach eliminates the need for a sampling controller to be learned and enables the encompassment of non-differentiable metrics. We provide an efficient NAS framework that can be readily customized to address real-world multi-objective NAS (MNAS) scenarios, encompassing factors such as resource limitations and platform specialization. Notably, compared with other multi-objective NAS methods, our NAS framework effectively decreases the computational burden (accounting for just 1/6 of the NSGA-Net). This search framework is also compatible with the other efficiency and performance improvement strategies under the differentiable NAS framework.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lu, Yao; Rodriguez, Hiram Rayo Torres; Vogel, Sebastian; Waterlaat, Nick; Jancura, Pavol
Scaling Up Quantization-Aware Neural Architecture Search for Efficient Deep Learning on the Edge Technical Report
2024.
@techreport{DBLP:journals/corr/abs-2401-12350,
title = {Scaling Up Quantization-Aware Neural Architecture Search for Efficient Deep Learning on the Edge},
author = {Yao Lu and Hiram Rayo Torres Rodriguez and Sebastian Vogel and Nick Waterlaat and Pavol Jancura},
url = {https://doi.org/10.48550/arXiv.2401.12350},
doi = {10.48550/ARXIV.2401.12350},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {CoRR},
volume = {abs/2401.12350},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gambella, Matteo; Pomponi, Jary; Scardapane, Simone; Roveri, Manuel
NACHOS: Neural Architecture Search for Hardware Constrained Early Exit Neural Networks Technical Report
2024.
@techreport{gambella2024nachos,
title = {NACHOS: Neural Architecture Search for Hardware Constrained Early Exit Neural Networks},
author = {Matteo Gambella and Jary Pomponi and Simone Scardapane and Manuel Roveri},
url = {https://arxiv.org/abs/2401.13330},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Cui, Suhan; Wang, Jiaqi; Zhong, Yuan; Liu, Han; Wang, Ting; Ma, Fenglong
Automated Fusion of Multimodal Electronic Health Records for Better Medical Predictions Technical Report
2024.
@techreport{cui2024automated,
title = {Automated Fusion of Multimodal Electronic Health Records for Better Medical Predictions},
author = {Suhan Cui and Jiaqi Wang and Yuan Zhong and Han Liu and Ting Wang and Fenglong Ma},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Sun, Yize; Wu, Zixin; Ma, Yunpu; Tresp, Volker
Quantum Architecture Search with Unsupervised Representation Learning Technical Report
2024.
@techreport{sun2024quantum,
title = {Quantum Architecture Search with Unsupervised Representation Learning},
author = {Yize Sun and Zixin Wu and Yunpu Ma and Volker Tresp},
url = {https://arxiv.org/abs/2401.11576},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xue, Yu; Zhang, Zhenman; Neri, Ferrante
Similarity surrogate-assisted evolutionary neural architecture search with dual encoding strategy Technical Report
no. 2, 2024, ISSN: 2688-1594.
@techreport{nokey,
title = {Similarity surrogate-assisted evolutionary neural architecture search with dual encoding strategy},
author = {Yu Xue and Zhenman Zhang and Ferrante Neri},
url = {https://www.aimspress.com/article/doi/10.3934/era.2024050},
doi = {10.3934/era.2024050},
issn = {2688-1594},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Electronic Research Archive},
volume = {32},
number = {2},
pages = {1017-1043},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lv, Zeqiong; Qian, Chao; Sun, Yanan
A First Step Towards Runtime Analysis of Evolutionary Neural Architecture Search Technical Report
2024.
@techreport{lv2024step,
title = {A First Step Towards Runtime Analysis of Evolutionary Neural Architecture Search},
author = {Zeqiong Lv and Chao Qian and Yanan Sun},
url = {https://arxiv.org/abs/2401.11712},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Xinlei; He, Mingshu; Wang, Jiaxuan; Wang, Xiaojuan
Towards Efficient Neural Networks Through Predictor-Assisted NSGA-III for Anomaly Traffic Detection of IoT Journal Article
In: IEEE Transactions on Cognitive Communications and Networking, pp. 1-1, 2024.
@article{10403928,
title = {Towards Efficient Neural Networks Through Predictor-Assisted NSGA-III for Anomaly Traffic Detection of IoT},
author = {Xinlei Wang and Mingshu He and Jiaxuan Wang and Xiaojuan Wang},
url = {https://ieeexplore.ieee.org/abstract/document/10403928},
doi = {10.1109/TCCN.2024.3355433},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {IEEE Transactions on Cognitive Communications and Networking},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fuentes-Tomás, José Antonio; Acosta-Mesa, Héctor Gabriel; Mezura-Montes, Efrén; Jiménez, Rodolfo Hernandez
Neural Architecture Search for Placenta Segmentation in 2D Ultrasound Images Proceedings Article
In: Calvo, Hiram; Martínez-Villaseñor, Lourdes; Ponce, Hiram; Cabada, Ramón Zatarain; Rivera, Martín Montes; Mezura-Montes, Efrén (Ed.): Advances in Computational Intelligence. MICAI 2023 International Workshops, pp. 397–408, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-51940-6.
@inproceedings{10.1007/978-3-031-51940-6_30,
title = {Neural Architecture Search for Placenta Segmentation in 2D Ultrasound Images},
author = {José Antonio Fuentes-Tomás and Héctor Gabriel Acosta-Mesa and Efrén Mezura-Montes and Rodolfo Hernandez Jiménez},
editor = {Hiram Calvo and Lourdes Martínez-Villaseñor and Hiram Ponce and Ramón Zatarain Cabada and Martín Montes Rivera and Efrén Mezura-Montes},
url = {https://link.springer.com/chapter/10.1007/978-3-031-51940-6_30#citeas},
isbn = {978-3-031-51940-6},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Advances in Computational Intelligence. MICAI 2023 International Workshops},
pages = {397–408},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Monitoring the placenta during pregnancy can lead to early diagnosis of anomalies by observing their characteristics, such as size, shape, and location. Ultrasound is a popular medical imaging technique used in placenta monitoring, whose advantages include the non-invasive feature, price, and accessibility. However, images from this domain are characterized by their noise. A segmentation system is required to recognize placenta features. U-Net architecture is a convolutional neural network that has become popular in the literature for medical image segmentation tasks. However, this type is a general-purpose network that requires great expertise to design and may only be applicable in some domains. The evolutionary computation overcomes this limitation, leading to the automatic design of convolutional neural networks. This work proposes a U-Net-based neural architecture search algorithm to construct convolutional neural networks applied in the placenta segmentation on 2D ultrasound images. The results show that the proposed algorithm allows a decrease in the number of parameters of U-Net, ranging from 80 to 98%. Moreover, the segmentation performance achieves a competitive level compared to U-Net, with a difference of 0.012 units in the Dice index.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kumar, Kapil; Verma, Kamal Kant
Comparative study on object detection in visual scenes using deep learning Journal Article
In: World Journal of Biology Pharmacy and Health Sciences Site Logo World Journal of Advanced Engineering Technology and Sciences, 2024.
@article{Kumar-wjaets23a,
title = {Comparative study on object detection in visual scenes using deep learning},
author = {Kapil Kumar and Kamal Kant Verma
},
url = {https://wjaets.com/content/comparative-study-object-detection-visual-scenes-using-deep-learning},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {World Journal of Biology Pharmacy and Health Sciences Site Logo World Journal of Advanced Engineering Technology and Sciences},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mehrabian, Amir; Sabbaghian, Maryam; Yanikomeroglu, Halim
RL-Based Hyperparameter Selection for Spectrum Sensing With CNNs Journal Article
In: IEEE Transactions on Communications, pp. 1-1, 2024.
@article{10399938,
title = {RL-Based Hyperparameter Selection for Spectrum Sensing With CNNs},
author = {Amir Mehrabian and Maryam Sabbaghian and Halim Yanikomeroglu},
url = {https://ieeexplore.ieee.org/abstract/document/10399938},
doi = {10.1109/TCOMM.2024.3354204},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {IEEE Transactions on Communications},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Anagolum, Sashwat; Alavisamani, Narges; Das, Poulami; Qureshi, Moinuddin; Kessler, Eric; Shi, Yunong
Élivágar: Efficient Quantum Circuit Search for Classification Technical Report
2024.
@techreport{anagolum2024elivagar,
title = {Élivágar: Efficient Quantum Circuit Search for Classification},
author = {Sashwat Anagolum and Narges Alavisamani and Poulami Das and Moinuddin Qureshi and Eric Kessler and Yunong Shi},
url = {https://arxiv.org/abs/2401.09393},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yang, An; Liu, Ying; Li, Chunguang; Ren, Qinyuan
Deeply Supervised Block-Wise Neural Architecture Search Journal Article
In: IEEE Trans Neural Netw Learn Syst , 2024.
@article{Yang-itnnls24a,
title = { Deeply Supervised Block-Wise Neural Architecture Search },
author = {An Yang and Ying Liu and Chunguang Li and Qinyuan Ren
},
url = {https://pubmed.ncbi.nlm.nih.gov/38231812/},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = { IEEE Trans Neural Netw Learn Syst },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Qiao, Ye; Xu, Haocheng; Zhang, Yifan; Huang, Sitao
MicroNAS: Zero-Shot Neural Architecture Search for MCUs Technical Report
2024.
@techreport{qiao2024micronas,
title = {MicroNAS: Zero-Shot Neural Architecture Search for MCUs},
author = {Ye Qiao and Haocheng Xu and Yifan Zhang and Sitao Huang},
url = {https://arxiv.org/abs/2401.08996},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Raja, Muhammad Adil; Loughran, Róisín; Mccaffery, Fergal
Performance Analysis of YOLO-NAS SOTA Models on CAL Tool Detection Journal Article
In: 2024.
@article{Raja_2024,
title = {Performance Analysis of YOLO-NAS SOTA Models on CAL Tool Detection},
author = {Muhammad Adil Raja and Róisín Loughran and Fergal Mccaffery},
url = {http://dx.doi.org/10.36227/techrxiv.170474405.56692658/v1},
doi = {10.36227/techrxiv.170474405.56692658/v1},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Jianwei Zhao Jie Li Xin
Evolutionary Neural Architecture Search and Its Applications in Healthcare Journal Article
In: Computer Modeling in Engineering & Sciences, vol. 139, no. 1, pp. 143–185, 2024, ISSN: 1526-1506.
@article{cmes.2023.030391,
title = {Evolutionary Neural Architecture Search and Its Applications in Healthcare},
author = {Jianwei Zhao Jie Li Xin Liu},
url = {http://www.techscience.com/CMES/v139n1/55101},
doi = {10.32604/cmes.2023.030391},
issn = {1526-1506},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Computer Modeling in Engineering & Sciences},
volume = {139},
number = {1},
pages = {143–185},
abstract = {Most of the neural network architectures are based on human experience, which requires a long and tedious trial-and-error process. Neural architecture search (NAS) attempts to detect effective architectures without human intervention. Evolutionary algorithms (EAs) for NAS can find better solutions than human-designed architectures by exploring a large search space for possible architectures. Using multiobjective EAs for NAS, optimal neural architectures that meet various performance criteria can be explored and discovered efficiently. Furthermore, hardware-accelerated NAS methods can improve the efficiency of the NAS. While existing reviews have mainly focused on different strategies to complete NAS, a few studies have explored the use of EAs for NAS. In this paper, we summarize and explore the use of EAs for NAS, as well as large-scale multiobjective optimization strategies and hardware-accelerated NAS methods. NAS performs well in healthcare applications, such as medical image analysis, classification of disease diagnosis, and health monitoring. EAs for NAS can automate the search process and optimize multiple objectives simultaneously in a given healthcare task. Deep neural network has been successfully used in healthcare, but it lacks interpretability. Medical data is highly sensitive, and privacy leaks are frequently reported in the healthcare industry. To solve these problems, in healthcare, we propose an interpretable neuroevolution framework based on federated learning to address search efficiency and privacy protection. Moreover, we also point out future research directions for evolutionary NAS. Overall, for researchers who want to use EAs to optimize NNs in healthcare, we analyze the advantages and disadvantages of doing so to provide detailed guidance, and propose an interpretable privacy-preserving framework for healthcare applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vu, Thanh
TOWARD EFFICIENT AND ROBUST COMPUTER VISION FOR LARGE-SCALE EDGE APPLICATIONS PhD Thesis
2024.
@phdthesis{Yu-PhD23a,
title = {TOWARD EFFICIENT AND ROBUST COMPUTER VISION FOR LARGE-SCALE EDGE APPLICATIONS},
author = {Thanh Vu },
url = {https://www.proquest.com/openview/9bb8722acfbd4abf1c7d8317a9d79342/1?pq-origsite=gscholar&cbl=18750&diss=y},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Feng, Jian; He, Yajie; Pan, Yuhan; Zhou, Zhipeng; Chen, Si; Gong, Wei
Enhancing Fitness Evaluation in Genetic Algorithm-Based Architecture Search for AI-Aided Financial Regulation Journal Article
In: IEEE Transactions on Evolutionary Computation, pp. 1-1, 2024.
@article{10388040,
title = {Enhancing Fitness Evaluation in Genetic Algorithm-Based Architecture Search for AI-Aided Financial Regulation},
author = {Jian Feng and Yajie He and Yuhan Pan and Zhipeng Zhou and Si Chen and Wei Gong},
url = {https://ieeexplore.ieee.org/abstract/document/10388040},
doi = {10.1109/TEVC.2024.3352239},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {IEEE Transactions on Evolutionary Computation},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}