Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
5555
Zhu, Huijuan; Xia, Mengzhen; Wang, Liangmin; Xu, Zhicheng; Sheng, Victor S.
A Novel Knowledge Search Structure for Android Malware Detection Journal Article
In: IEEE Transactions on Services Computing, no. 01, pp. 1-14, 5555, ISSN: 1939-1374.
@article{10750332,
title = { A Novel Knowledge Search Structure for Android Malware Detection },
author = {Huijuan Zhu and Mengzhen Xia and Liangmin Wang and Zhicheng Xu and Victor S. Sheng},
url = {https://doi.ieeecomputersociety.org/10.1109/TSC.2024.3496333},
doi = {10.1109/TSC.2024.3496333},
issn = {1939-1374},
year = {5555},
date = {5555-11-01},
urldate = {5555-11-01},
journal = {IEEE Transactions on Services Computing},
number = {01},
pages = {1-14},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {While the Android platform is gaining explosive popularity, the number of malicious software (malware) is also increasing sharply. Thus, numerous malware detection schemes based on deep learning have been proposed. However, they are usually suffering from the cumbersome models with complex architectures and tremendous parameters. They usually require heavy computation power support, which seriously limit their deployment on actual application environments with limited resources (e.g., mobile edge devices). To surmount this challenge, we propose a novel Knowledge Distillation (KD) structure—Knowledge Search (KS). KS exploits Neural Architecture Search (NAS) to adaptively bridge the capability gap between teacher and student networks in KD by introducing a parallelized student-wise search approach. In addition, we carefully analyze the characteristics of malware and locate three cost-effective types of features closely related to malicious attacks, namely, Application Programming Interfaces (APIs), permissions and vulnerable components, to characterize Android Applications (Apps). Therefore, based on typical samples collected in recent years, we refine features while exploiting the natural relationship between them, and construct corresponding datasets. Massive experiments are conducted to investigate the effectiveness and sustainability of KS on these datasets. Our experimental results show that the proposed method yields an accuracy of 97.89% to detect Android malware, which performs better than state-of-the-art solutions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Feifei; Li, Mao; Ge, Jidong; Tang, Fenghui; Zhang, Sheng; Wu, Jie; Luo, Bin
Privacy-Preserving Federated Neural Architecture Search With Enhanced Robustness for Edge Computing Journal Article
In: IEEE Transactions on Mobile Computing, no. 01, pp. 1-18, 5555, ISSN: 1558-0660.
@article{10742476,
title = { Privacy-Preserving Federated Neural Architecture Search With Enhanced Robustness for Edge Computing },
author = {Feifei Zhang and Mao Li and Jidong Ge and Fenghui Tang and Sheng Zhang and Jie Wu and Bin Luo},
url = {https://doi.ieeecomputersociety.org/10.1109/TMC.2024.3490835},
doi = {10.1109/TMC.2024.3490835},
issn = {1558-0660},
year = {5555},
date = {5555-11-01},
urldate = {5555-11-01},
journal = {IEEE Transactions on Mobile Computing},
number = {01},
pages = {1-18},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {With the development of large-scale artificial intelligence services, edge devices are becoming essential providers of data and computing power. However, these edge devices are not immune to malicious attacks. Federated learning (FL), while protecting privacy of decentralized data through secure aggregation, struggles to trace adversaries and lacks optimization for heterogeneity. We discover that FL augmented with Differentiable Architecture Search (DARTS) can improve resilience against backdoor attacks while compatible with secure aggregation. Based on this, we propose a federated neural architecture search (NAS) framwork named SLNAS. The architecture of SLNAS is built on three pivotal components: a server-side search space generation method that employs an evolutionary algorithm with dual encodings, a federated NAS process based on DARTS, and client-side architecture tuning that utilizes Gumbel softmax combined with knowledge distillation. To validate robustness, we adapt a framework that includes backdoor attacks based on trigger optimization, data poisoning, and model poisoning, targeting both model weights and architecture parameters. Extensive experiments demonstrate that SLNAS not only effectively counters advanced backdoor attacks but also handles heterogeneity, outperforming defense baselines across a wide range of backdoor attack scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Song, Xiaotian; Xie, Jingrong; Feng, Yuqi; Song, Andy; Sun, Yanan
DAP: Domain Adaptive Performance Predictor for Efficient Neural Architecture Search Journal Article
In: IEEE Transactions on Computers, no. 01, pp. 1-12, 5555, ISSN: 1557-9956.
@article{11216133,
title = { DAP: Domain Adaptive Performance Predictor for Efficient Neural Architecture Search },
author = {Xiaotian Song and Jingrong Xie and Yuqi Feng and Andy Song and Yanan Sun},
url = {https://doi.ieeecomputersociety.org/10.1109/TC.2025.3624960},
doi = {10.1109/TC.2025.3624960},
issn = {1557-9956},
year = {5555},
date = {5555-10-01},
urldate = {5555-10-01},
journal = {IEEE Transactions on Computers},
number = {01},
pages = {1-12},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Neural architecture search (NAS) aims to automatically design high-performance architectures of deep neural networks, which have shown great potential in various fields. However, the search process of NAS is computationally expensive since plenty of deep neural networks are trained to get the performance on GPUs. Performance predictors can directly estimate the performance of architectures without GPU-based training, thus can overcome this barrier. However, the construction of performance predictors requires labeling plenty of architectures sampled from the corresponding NAS search space, which is still prohibitively costly. In this paper, we propose a Domain Adaptive performance Predictor (DAP), which can construct a performance predictor based on the labeled architectures provided by existing benchmarks and then enable it to other search spaces via domain adaptive techniques. To achieve this, we first propose a domain-agnostic feature extraction method to refine the domain-invariant features of neural architectures. Then, we propose a novel embedding method to learn the shared representations of architecture operations. Experimental results demonstrate that DAP outperforms eight baselines upon six popular search spaces. Notably, we only require the search cost of 0:0002 GPU Days to find the architecture with 77:10% top-1 accuracy on ImageNet and 97:86% on CIFAR-10. In addition, we show the theoretical upper bound of the generalization error in the target search space, further illustrating the generalizability of DAP. The source code is available at https://anonymous.4open.science/r/DAP-2F1F/.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Yu-Ming; Hsieh, Jun-Wei; Lee, Chun-Chieh; Fan, Kuo-Chin
RATs-NAS: Redirection of Adjacent Trails on Graph Convolutional Networks for Predictor-based Neural Architecture Search Journal Article
In: IEEE Transactions on Artificial Intelligence, vol. 1, no. 01, pp. 1-11, 5555, ISSN: 2691-4581.
@article{10685480,
title = { RATs-NAS: Redirection of Adjacent Trails on Graph Convolutional Networks for Predictor-based Neural Architecture Search },
author = {Yu-Ming Zhang and Jun-Wei Hsieh and Chun-Chieh Lee and Kuo-Chin Fan},
url = {https://doi.ieeecomputersociety.org/10.1109/TAI.2024.3465433},
doi = {10.1109/TAI.2024.3465433},
issn = {2691-4581},
year = {5555},
date = {5555-09-01},
urldate = {5555-09-01},
journal = {IEEE Transactions on Artificial Intelligence},
volume = {1},
number = {01},
pages = {1-11},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Manually designed CNN architectures like VGG, ResNet, DenseNet, and MobileNet have achieved high performance across various tasks, but design them is time-consuming and costly. Neural Architecture Search (NAS) automates the discovery of effective CNN architectures, reducing the need for experts. However, evaluating candidate architectures requires significant GPU resources, leading to the use of predictor-based NAS, such as graph convolutional networks (GCN), which is the popular option to construct predictors. However, we discover that, even though the ability of GCN mimics the propagation of features of real architectures, the binary nature of the adjacency matrix limits its effectiveness. To address this, we propose Redirection of Adjacent Trails (RATs), which adaptively learns trail weights within the adjacency matrix. Our RATs-GCN outperform other predictors by dynamically adjusting trail weights after each graph convolution layer. Additionally, the proposed Divide Search Sampling (DSS) strategy, based on the observation of cell-based NAS that architectures with similar FLOPs perform similarly, enhances search efficiency. Our RATs-NAS, which combine RATs-GCN and DSS, shows significant improvements over other predictor-based NAS methods on NASBench-101, NASBench-201, and NASBench-301.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, X.; Yang, C.
CIMNet: Joint Search for Neural Network and Computing-in-Memory Architecture Journal Article
In: IEEE Micro, no. 01, pp. 1-12, 5555, ISSN: 1937-4143.
@article{10551739,
title = {CIMNet: Joint Search for Neural Network and Computing-in-Memory Architecture},
author = {X. Chen and C. Yang},
url = {https://www.computer.org/csdl/magazine/mi/5555/01/10551739/1XyKBmSlmPm},
doi = {10.1109/MM.2024.3409068},
issn = {1937-4143},
year = {5555},
date = {5555-06-01},
urldate = {5555-06-01},
journal = {IEEE Micro},
number = {01},
pages = {1-12},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Computing-in-memory (CIM) architecture has been proven to effectively transcend the memory wall bottleneck, expanding the potential of low-power and high-throughput applications such as machine learning. Neural architecture search (NAS) designs ML models to meet a variety of accuracy, latency, and energy constraints. However, integrating CIM into NAS presents a major challenge due to additional simulation overhead from the non-ideal characteristics of CIM hardware. This work introduces a quantization and device aware accuracy predictor that jointly scores quantization policy, CIM architecture, and neural network architecture, eliminating the need for time-consuming simulations in the search process. We also propose reducing the search space based on architectural observations, resulting in a well-pruned search space customized for CIM. These allow for efficient exploration of superior combinations in mere CPU minutes. Our methodology yields CIMNet, which consistently improves the trade-off between accuracy and hardware efficiency on benchmarks, providing valuable architectural insights.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lv, Hao; Zhang, Lei; Wang, Ying
In-situ NAS: A Plug-and-Search Neural Architecture Search framework across hardware platforms Journal Article
In: IEEE Transactions on Computers, no. 01, pp. 1-14, 5555, ISSN: 1557-9956.
@article{11003207,
title = { In-situ NAS: A Plug-and-Search Neural Architecture Search framework across hardware platforms },
author = {Hao Lv and Lei Zhang and Ying Wang},
url = {https://doi.ieeecomputersociety.org/10.1109/TC.2025.3569161},
doi = {10.1109/TC.2025.3569161},
issn = {1557-9956},
year = {5555},
date = {5555-05-01},
urldate = {5555-05-01},
journal = {IEEE Transactions on Computers},
number = {01},
pages = {1-14},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Hardware-aware Neural Architecture Search (HW-NAS) has garnered significant research interest due to its ability to automate the design of neural networks for various hardware platforms. Prevalent HW-NAS frameworks often use fast predictors to estimate network performance, bypassing the time-consuming actual profiling step. However, the resource-intensive nature of building these predictors and their accuracy limitations hinder their practical use in diverse deployment scenarios. In response, we emphasize the indispensable role of actual profiling in HW-NAS and explore efficiency optimization possibilities within the HW-NAS framework. We provide a systematic analysis of profiling overhead in HW-NAS and identify many redundant and unnecessary operations during the search phase. We then optimize the workflow and present Insitu NAS, which leverages similarity features and exploration history to eliminate redundancy and improve runtime efficiency. In-situ NAS also offers simplified interfaces to ease the user’s effort in managing the complex device-dependent profiling flow, enabling plug-and-search functionality across diverse hardware platforms. Experimental results show that In-situ NAS achieves an average 10x speedup across different hardware platforms while reducing the search overhead by 8x compared to predictor-based approaches in various deployment scenarios. Additionally, In-situ NAS consistently discovers networks with better accuracy (about 1.5%) across diverse hardware platforms compared to predictor-based NAS.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Siddique, Ayesha; Hoque, Khaza Anuarul
Explainable AI-Guided Neural Architecture Search for Adversarial Robustness in Approximate DNNs Journal Article
In: IEEE Transactions on Sustainable Computing, no. 01, pp. 1-15, 5555, ISSN: 2377-3782.
@article{10966055,
title = { Explainable AI-Guided Neural Architecture Search for Adversarial Robustness in Approximate DNNs },
author = {Ayesha Siddique and Khaza Anuarul Hoque},
url = {https://doi.ieeecomputersociety.org/10.1109/TSUSC.2025.3561603},
doi = {10.1109/TSUSC.2025.3561603},
issn = {2377-3782},
year = {5555},
date = {5555-04-01},
urldate = {5555-04-01},
journal = {IEEE Transactions on Sustainable Computing},
number = {01},
pages = {1-15},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Deep neural networks are lucrative targets of adversarial attacks and approximate deep neural networks (AxDNNs) are no exception. Searching manually for adversarially robust AxDNN architectures incurs outrageous time and human effort. In this paper, we propose XAI-NAS, an explainable neural architecture search (NAS) method that leverages explainable artificial intelligence (XAI) to efficiently co-optimize the adversarial robustness and hardware efficiency of AxDNN architectures on systolic-array hardware accelerators. During the NAS process, AxDNN architectures are evolved layer-wise with heterogeneous approximate multipliers to deliver the best trade-offs between adversarial robustness, energy consumption, latency, and memory footprint. The most suitable approximate multipliers are automatically selected from an open-source Evoapprox8b library. Our extensive evaluations provide a set of Pareto optimal hardware efficient and adversarially robust solutions. For example, a Pareto-optimal DNN AxDNN for the MNIST and CIFAR-10 datasets exhibits up to 1.5× higher adversarial robustness, 2.1× less energy consumption, 4.39× reduced latency, and 2.37× low memory footprint when compared to the state-of-the-art NAS approaches.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dong, Yukang; Pan, Fanxing; Gui, Yi; Jiang, Wenbin; Wan, Yao; Zheng, Ran; Jin, Hai
Comprehensive Architecture Search for Deep Graph Neural Networks Journal Article
In: IEEE Transactions on Big Data, no. 01, pp. 1-15, 5555, ISSN: 2332-7790.
@article{10930718,
title = { Comprehensive Architecture Search for Deep Graph Neural Networks },
author = {Yukang Dong and Fanxing Pan and Yi Gui and Wenbin Jiang and Yao Wan and Ran Zheng and Hai Jin},
url = {https://doi.ieeecomputersociety.org/10.1109/TBDATA.2025.3552336},
doi = {10.1109/TBDATA.2025.3552336},
issn = {2332-7790},
year = {5555},
date = {5555-03-01},
urldate = {5555-03-01},
journal = {IEEE Transactions on Big Data},
number = {01},
pages = {1-15},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {In recent years, Neural Architecture Search (NAS) has emerged as a promising approach for automatically discovering superior model architectures for deep Graph Neural Networks (GNNs). Different methods have paid attention to different types of search spaces. However, due to the time-consuming nature of training deep GNNs, existing NAS methods often fail to explore diverse search spaces sufficiently, which constrains their effectiveness. To crack this hard nut, we propose CAS-DGNN, a novel comprehensive architecture search method for deep GNNs. It encompasses four kinds of search spaces that are the composition of aggregate and update operators, different types of aggregate operators, residual connections, and hyper-parameters. To meet the needs of such a complex situation, a phased and hybrid search strategy is proposed to accommodate the diverse characteristics of different search spaces. Specifically, we divide the search process into four phases, utilizing evolutionary algorithms and Bayesian optimization. Meanwhile, we design two distinct search methods for residual connections (All-connected search and Initial Residual search) to streamline the search space, which enhances the scalability of CAS-DGNN. The experimental results show that CAS-DGNN achieves higher accuracy with competitive search costs across ten public datasets compared to existing methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yan, J.; Liu, J.; Xu, H.; Wang, Z.; Qiao, C.
Peaches: Personalized Federated Learning with Neural Architecture Search in Edge Computing Journal Article
In: IEEE Transactions on Mobile Computing, no. 01, pp. 1-17, 5555, ISSN: 1558-0660.
@article{10460163,
title = {Peaches: Personalized Federated Learning with Neural Architecture Search in Edge Computing},
author = {J. Yan and J. Liu and H. Xu and Z. Wang and C. Qiao},
doi = {10.1109/TMC.2024.3373506},
issn = {1558-0660},
year = {5555},
date = {5555-03-01},
urldate = {5555-03-01},
journal = {IEEE Transactions on Mobile Computing},
number = {01},
pages = {1-17},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {In edge computing (EC), federated learning (FL) enables numerous distributed devices (or workers) to collaboratively train AI models without exposing their local data. Most works of FL adopt a predefined architecture on all participating workers for model training. However, since workers' local data distributions vary heavily in EC, the predefined architecture may not be the optimal choice for every worker. It is also unrealistic to manually design a high-performance architecture for each worker, which requires intense human expertise and effort. In order to tackle this challenge, neural architecture search (NAS) has been applied in FL to automate the architecture design process. Unfortunately, the existing federated NAS frameworks often suffer from the difficulties of system heterogeneity and resource limitation. To remedy this problem, we present a novel framework, termed Peaches, to achieve efficient searching and training in the resource-constrained EC system. Specifically, the local model of each worker is stacked by base cell and personal cell, where the base cell is shared by all workers to capture the common knowledge and the personal cell is customized for each worker to fit the local data. We determine the number of base cells, shared by all workers, according to the bandwidth budget on the parameters server. Besides, to relieve the data and system heterogeneity, we find the optimal number of personal cells for each worker based on its computing capability. In addition, we gradually prune the search space during training to mitigate the resource consumption. We evaluate the performance of Peaches through extensive experiments, and the results show that Peaches can achieve an average accuracy improvement of about 6.29% and up to 3.97× speed up compared with the baselines.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sun, Genchen; Liu, Zhengkun; Gan, Lin; Su, Hang; Li, Ting; Zhao, Wenfeng; Sun, Biao
SpikeNAS-Bench: Benchmarking NAS Algorithms for Spiking Neural Network Architecture Journal Article
In: IEEE Transactions on Artificial Intelligence, vol. 1, no. 01, pp. 1-12, 5555, ISSN: 2691-4581.
@article{10855683,
title = { SpikeNAS-Bench: Benchmarking NAS Algorithms for Spiking Neural Network Architecture },
author = {Genchen Sun and Zhengkun Liu and Lin Gan and Hang Su and Ting Li and Wenfeng Zhao and Biao Sun},
url = {https://doi.ieeecomputersociety.org/10.1109/TAI.2025.3534136},
doi = {10.1109/TAI.2025.3534136},
issn = {2691-4581},
year = {5555},
date = {5555-01-01},
urldate = {5555-01-01},
journal = {IEEE Transactions on Artificial Intelligence},
volume = {1},
number = {01},
pages = {1-12},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {In recent years, Neural Architecture Search (NAS) has marked significant advancements, yet its efficacy is marred by the dependence on substantial computational resources. To mitigate this, the development of NAS benchmarks has emerged, offering datasets that enumerate all potential network architectures and their performances within a predefined search space. Nonetheless, these benchmarks predominantly focus on convolutional architectures, which are criticized for their limited interpretability and suboptimal hardware efficiency. Recognizing the untapped potential of Spiking Neural Networks (SNNs) — often hailed as the third generation of neural networks for their biological realism and computational thrift — this study introduces SpikeNAS-Bench. As a pioneering benchmark for SNN, SpikeNAS-Bench utilizes a cell-based search space, integrating leaky integrate-and-fire (LIF) neurons with variable thresholds as candidate operations. It encompasses 15,625 candidate architectures, rigorously evaluated on CIFAR10, CIFAR100 and Tiny-ImageNet datasets. This paper delves into the architectural nuances of SpikeNAS-Bench, leveraging various criteria to underscore the benchmark’s utility and presenting insights that could steer future NAS algorithm designs. Moreover, we assess the benchmark’s consistency through three distinct proxy types: zero-cost-based, early-stop-based, and predictor-based proxies. Additionally, the paper benchmarks seven contemporary NAS algorithms to attest to SpikeNAS-Bench’s broad applicability. We commit to providing training logs, diagnostic data for all candidate architectures, and the promise to release all code and datasets post-acceptance, aiming to catalyze further exploration and innovation within the SNN domain. SpikeNAS-Bench is open source at https://github.com/XXX (hidden for double anonymous review).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Changlin; Lin, Sihao; Tang, Tao; Wang, Guangrun; Li, Mingjie; Li, Zhihui; Chang, Xiaojun
BossNAS Family: Block-wisely Self-supervised Neural Architecture Search Journal Article
In: IEEE Transactions on Pattern Analysis & Machine Intelligence, no. 01, pp. 1-15, 5555, ISSN: 1939-3539.
@article{10839629,
title = { BossNAS Family: Block-wisely Self-supervised Neural Architecture Search },
author = {Changlin Li and Sihao Lin and Tao Tang and Guangrun Wang and Mingjie Li and Zhihui Li and Xiaojun Chang},
url = {https://doi.ieeecomputersociety.org/10.1109/TPAMI.2025.3529517},
doi = {10.1109/TPAMI.2025.3529517},
issn = {1939-3539},
year = {5555},
date = {5555-01-01},
urldate = {5555-01-01},
journal = {IEEE Transactions on Pattern Analysis & Machine Intelligence},
number = {01},
pages = {1-15},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Recent advances in hand-crafted neural architectures for visual recognition underscore the pressing need to explore architecture designs comprising diverse building blocks. Concurrently, neural architecture search (NAS) methods have gained traction as a means to alleviate human efforts. Nevertheless, the question of whether NAS methods can efficiently and effectively manage diversified search spaces featuring disparate candidates, such as Convolutional Neural Networks (CNNs) and transformers, remains an open question. In this work, we introduce a novel unsupervised NAS approach called BossNAS (Block-wisely Self-supervised Neural Architecture Search), which aims to address the problem of inaccurate predictive architecture ranking caused by a large weight-sharing space while mitigating potential ranking issue caused by biased supervision. To achieve this, we factorize the search space into blocks and introduce a novel self-supervised training scheme called Ensemble Bootstrapping, to train each block separately in an unsupervised manner. In the search phase, we propose an unsupervised Population-Centric Search, optimizing the candidate architecture towards the population center. Additionally, we enhance our NAS method by integrating masked image modeling and present BossNAS++ to overcome the lack of dense supervision in our block-wise self-supervised NAS. In BossNAS++, we introduce the training technique named Masked Ensemble Bootstrapping for block-wise supernet, accompanied by a Masked Population-Centric Search scheme to promote fairer architecture selection. Our family of models, discovered through BossNAS and BossNAS++, delivers impressive results across various search spaces and datasets. Our transformer model discovered by BossNAS++ attains a remarkable accuracy of 83.2% on ImageNet with only 10.5B MAdds, surpassing DeiT-B by 1.4% while maintaining a lower computation cost. Moreover, our approach excels in architecture rating accuracy, achieving Spearman correlations of 0.78 and 0.76 on the canonical MBConv search space with ImageNet and the NATS-Bench size search space with CIFAR-100, respectively, outperforming state-of-the-art NAS methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2026
Wu, Yu; Fan, Hailong; Zhou, Zekun; Ying, Weiqin; Wu, Zhenfeng; Zhao, Jinhuai; Yu, Fahong
Federated progressive evolutionary neural architecture search with three-phase acceleration for privacy-preserving medical image segmentation Journal Article
In: Applied Soft Computing, vol. 186, pp. 114146, 2026, ISSN: 1568-4946.
@article{WU2026114146,
title = {Federated progressive evolutionary neural architecture search with three-phase acceleration for privacy-preserving medical image segmentation},
author = {Yu Wu and Hailong Fan and Zekun Zhou and Weiqin Ying and Zhenfeng Wu and Jinhuai Zhao and Fahong Yu},
url = {https://www.sciencedirect.com/science/article/pii/S1568494625014590},
doi = {https://doi.org/10.1016/j.asoc.2025.114146},
issn = {1568-4946},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Applied Soft Computing},
volume = {186},
pages = {114146},
abstract = {Deep neural networks (DNNs) for medical image segmentation often require private data from multiple medical institutions, but privacy concerns limit data sharing. Federated learning (FL) offers a viable alternative to train networks across distributed private data. However, predefined network architectures for FL are often not optimal and require extensive manual tuning. Traditional neural architecture search (NAS) methods are unsuitable for FL due to the risk of privacy disclosure and high communication and evaluation costs. This paper proposes a federated progressive evolutionary NAS (FP-ENAS) method with three-phase acceleration to automatically generate neural architectures for privacy-preserving medical image segmentation. FP-ENAS constructs a tailored supernet and search space by enhancing U-Net++ with depthwise separable convolutions and adaptable skip connections. Then, FP-ENAS leverages a multi-objective evolutionary algorithm to search for its subnets with higher segmentation accuracy and fewer parameters. FP-ENAS adopts a three-phase hierarchical progressive acceleration mechanism to significantly reduce communication and evaluation costs of federated NAS. Specifically, the mechanism employs a federated weight sharing strategy, federated low-fidelity evaluation strategy, and federated online surrogate prediction strategy in the early, middle, and advanced phases, respectively, to balance the efficiency and accuracy of evaluations. This method accelerates the search process while maintaining high performance and ensuring privacy protection. Experimental results on the retinal vessel, brain tumor and skin lesion segmentation datasets show that FP-ENAS automatically and efficiently generates multiple excellent architectures with different sizes. They outperform several popular hand-crafted networks in terms of segmentation accuracy and model size for privacy-preserving medical image segmentation. Compared with the standard U-Net, the architectures generated by FP-ENAS yield the improvements of up to 2.27 and 1.94 % points respectively in the DICE score on the retinal vessel and brain tumor datasets, while requiring 31.1 % and 17.7 % fewer parameters.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xu, Qianao; Hu, Ziyu; Sun, Hao; Wei, Lixin; Dai, Yan; Zhang, Kexin
DDMM: Dual-criterion dynamic migration and multi-objective architecture search for the multi-output dendritic neuron model Journal Article
In: Expert Systems with Applications, vol. 299, pp. 130069, 2026, ISSN: 0957-4174.
@article{XU2026130069,
title = {DDMM: Dual-criterion dynamic migration and multi-objective architecture search for the multi-output dendritic neuron model},
author = {Qianao Xu and Ziyu Hu and Hao Sun and Lixin Wei and Yan Dai and Kexin Zhang},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425036851},
doi = {https://doi.org/10.1016/j.eswa.2025.130069},
issn = {0957-4174},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Expert Systems with Applications},
volume = {299},
pages = {130069},
abstract = {The multi-input multi-output dendritic neuron model, as an artificial neural network, has achieved remarkable results in handling nonlinear classification problems. However, it not only requires training a filtering matrix for pruning redundant dendrites but also needs to optimize structural parameters. Therefore, Multi-output dendritic neuron model is designed which retains the original soma layer from the dendritic neuron mode and discards the filtering matrix, enabling holistic multi-objective optimization of the model. To address insufficient population diversity and overfitting tendency in model optimization, a dual-criterion dynamic migration multi-objective evolutionary algorithm is proposed. In this algorithm, the original population is divided into three subpopulations, and computational resources are allocated based on two optimization criteria. Then the penalty mechanism and migration strategy are employed to enhance population diversity and exchange elite individuals across subpopulations. The introduction of the multi-output dendritic neuron model architecture converts the multi-output dendritic model structural search as a large-scale multi-objective optimization problem, and the dual-criterion dynamic migration multi-objective evolutionary algorithm is specifically designed to address Experimental results on 11 public datasets demonstrate that the proposed algorithm achieves improvements of 27.48 % in HV, 26.33 % in IGD, and 6.9 % in classification accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Qiuying; Chen, Zhixiang; Li, Yu; Jiang, Zhiyuan; Cao, Shan
Efficient network compression via gradient-score aware pruning Journal Article
In: Neurocomputing, vol. 660, pp. 131870, 2026, ISSN: 0925-2312.
@article{LI2026131870,
title = {Efficient network compression via gradient-score aware pruning},
author = {Qiuying Li and Zhixiang Chen and Yu Li and Zhiyuan Jiang and Shan Cao},
url = {https://www.sciencedirect.com/science/article/pii/S0925231225025421},
doi = {https://doi.org/10.1016/j.neucom.2025.131870},
issn = {0925-2312},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Neurocomputing},
volume = {660},
pages = {131870},
abstract = {Convolutional neural networks (CNNs) have demonstrated significant achievements in the field of computer vision, yet their high computational demands restrict practical applications. Current pruning methods seek to mitigate this issue, which however often rely on heuristic manual approaches, encountering challenges in maintaining both significant model compression and accuracy. To address the above issues, a fast neural architecture search pruning (FNP) technique is proposed in this paper. Firstly, an importance matrix (IM) based preprocessing stage efficiently removes redundant structures by considering both weight importance and computational complexity, providing a compact baseline for subsequent pruning. Secondly, we adapt fast genetic algorithms (FGA) to identify optimally pruned model configurations. Furthermore, to accelerate the search process, we utilize a zero-shot learning approach to estimate model performance with the score of the frame (SoF), which is a gradient-based score. Compared with state-of-the-art (SOTA) pruning techniques, FNP demonstrates superior performance in terms of search duration and compression ratio. On the CIFAR-10 dataset, our method removes 95.24 % of the parameters in VGG-16 while achieving a 0.72 % accuracy improvement compared with the baseline. On the ImageNet dataset, we prune 68.98 % of the parameters in ResNet-50 and obtain a 1.2 % accuracy improvement compared with state-of-the-art (SOTA) approaches, while reducing the search time by 98.94 %. The code is available at https://github.com/aqiu1222/FNP.git},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Clercq, Johannes De; Pillay, Nelishia
A selection perturbative hyper-heuristic for neural architecture search Journal Article
In: Neural Networks, vol. 195, pp. 108259, 2026, ISSN: 0893-6080.
@article{DECLERCQ2026108259,
title = {A selection perturbative hyper-heuristic for neural architecture search},
author = {Johannes De Clercq and Nelishia Pillay},
url = {https://www.sciencedirect.com/science/article/pii/S0893608025011402},
doi = {https://doi.org/10.1016/j.neunet.2025.108259},
issn = {0893-6080},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Neural Networks},
volume = {195},
pages = {108259},
abstract = {Neural architecture search explores the architecture space, referred to as the design spaces, to find an architecture that produces good results. Various approaches, such as genetic algorithms, are usually used to explore this space. This study investigates exploring an alternative space, namely, the heuristic space using a hyper-heuristic to indirectly explore the design space. The study introduces the concept of a NAS operator space (NOS). A single point selection perturbative hyper-heuristic(SPHH-NAS) explores a heuristic space that maps to the NOS which then maps to the design space. A choice function is used for heuristic selection and the Adaptive Improvement Limited Target Acceptance (AILTA) for move acceptance. It is anticipated that indirectly searching the design space will facilitate reaching areas of the search space that could not be reached by searching the space directly. SPHH-NAS was evaluated on three NAS benchmark sets, namely, NAS-101, NAS-201 and NAS-301. In addition to this the approach is evaluated on two real-world datasets. SPHH-NAS was found to outperform majority of the previous approaches used to solve these problems. In addition to this SPHH-NAS resulted in a reduction in computational cost.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ye, Tingyu; Zhang, Ping; Zeng, Hongliang; Wang, Jiahua
Multi-objective neural architecture search combining binary artificial bee colony algorithm for dynamic hand gesture recognition Journal Article
In: Expert Systems with Applications, vol. 299, pp. 130128, 2026, ISSN: 0957-4174.
@article{YE2026130128,
title = {Multi-objective neural architecture search combining binary artificial bee colony algorithm for dynamic hand gesture recognition},
author = {Tingyu Ye and Ping Zhang and Hongliang Zeng and Jiahua Wang},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425037431},
doi = {https://doi.org/10.1016/j.eswa.2025.130128},
issn = {0957-4174},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Expert Systems with Applications},
volume = {299},
pages = {130128},
abstract = {Designing neural network architectures for dynamic hand gesture recognition (DHGR) requires a careful balance between recognition accuracy and computational efficiency, particularly for real-time interaction on mobile or embedded platforms. To address this challenge, we propose MONAS_ABC, a multi-objective neural architecture search (NAS) framework based on the binary artificial bee colony algorithm. The framework incorporates tailored strategies across the employed, onlooker, and scout bee phases, enabling efficient exploration in a MobileNetV2-inspired lightweight search space through binary-encoded representations. We first evaluate MONAS_ABC on two multi-objective optimization benchmarks, C10/MOP and IN1K/MOP, where it demonstrates superior convergence behavior and solution diversity compared to conventional approaches. We further apply the framework to two real-world DHGR datasets: EgoGesture and NvGesture. On EgoGesture, MONAS_ABC achieves a Top-1 accuracy of 93.17 % with only 0.76G FLOPs and 2.05 million parameters, significantly outperforming established 3D CNN models such as C3D and TSM in both accuracy and resource consumption. Comparable performance is observed on NvGesture, confirming the generalizability of the discovered architectures. These results collectively demonstrate that MONAS_ABC effectively discovers scalable and efficient architectures, capable of balancing performance and complexity across both generic optimization problems and practical DHGR scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lin, Kezhen; Zhang, Yu; Hu, Wang; Wang, Heng; Hu, Junjie
Encoder-driven dynamically sampling NAS for key region re-identification in re-shot welding radiographs retrieval Journal Article
In: Applied Soft Computing, vol. 186, pp. 114098, 2026, ISSN: 1568-4946.
@article{LIN2026114098,
title = {Encoder-driven dynamically sampling NAS for key region re-identification in re-shot welding radiographs retrieval},
author = {Kezhen Lin and Yu Zhang and Wang Hu and Heng Wang and Junjie Hu},
url = {https://www.sciencedirect.com/science/article/pii/S1568494625014115},
doi = {https://doi.org/10.1016/j.asoc.2025.114098},
issn = {1568-4946},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Applied Soft Computing},
volume = {186},
pages = {114098},
abstract = {In non-destructive testing, the retrieval of re-shot welding radiographs aims to curb fraudulent practices by preventing the substitution of defective welds with re-shot qualified radiographs. The comparison of weld features is complicated due to changes in camera angles and shooting parameters. A key region re-identification method for re-shot welding radiograph retrieval is introduced in this paper. This approach re-identifies robust key regions across different shooting positions, then compares and arranges them based on their spatial positioning, proving effective for retrieving re-shot welding radiographs. To further enhance key region metric, neural architecture search (NAS) is used to find network architectures suitable for feature extraction from welding radiographs. For network encoding, a posterior tuned strategy is proposed, adjusting the decoder’s output distribution using its posterior distribution to ensure uniformity and boundary conditions in latent space mapping. For network evaluation, an efficient dynamically sampling NAS (DS-NAS) framework is introduced, including an improved triplet loss function, the intra-entity perception triplet loss (IePT loss), and an effective method for selecting hard samples. DS-NAS dynamically adjusts the difficulty of sample selection based on model performance, improving the efficiency and accuracy of network architecture evaluation in NAS. The proposed DS-NAS framework searched for a high-performing network within 3.6 GPU-days. The resulting architecture achieved a significantly superior 69.66 mAP on the key region retrieval dataset, far surpassing the baseline. Furthermore, when applied to the re-shot welding radiograph retrieval dataset using the key region-based retrieval approach, it achieved 62.04 mAP, enabling reliable identification of re-shot welding radiographs.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Terin, Rodrigo Carmo; Arenas, Zochil González; Santana, Roberto
Identifying Phase Transitions in the Classical Ising Model with Neural Networks: A Neural Architecture Search Perspective Proceedings Article
In: Márquez, Fausto Pedro García; Hameed, Alaa Ali; Jamil, Akhtar (Ed.): Pattern Recognition and Artificial Intelligence, pp. 741–756, Springer Nature Switzerland, Cham, 2026, ISBN: 978-3-031-90893-4.
@inproceedings{10.1007/978-3-031-90893-4_50,
title = {Identifying Phase Transitions in the Classical Ising Model with Neural Networks: A Neural Architecture Search Perspective},
author = {Rodrigo Carmo Terin and Zochil González Arenas and Roberto Santana},
editor = {Fausto Pedro García Márquez and Alaa Ali Hameed and Akhtar Jamil},
url = {https://link.springer.com/chapter/10.1007/978-3-031-90893-4_50},
isbn = {978-3-031-90893-4},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
booktitle = {Pattern Recognition and Artificial Intelligence},
pages = {741–756},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {The use of machine learning algorithms to investigate phase transitions in physical systems is a valuable way to better understand the characteristics of these systems. Neural networks have been used to extract information of phases and phase transitions directly from many-body configurations. However, one limitation of neural networks is that they require the definition of the model architecture and parameters previous to their application, and such determination is itself a difficult problem. In this paper, we investigate for the first time the relationship between the accuracy of neural networks for information of phases and the network configuration (that comprises the architecture and hyperparameters). We formulate the phase analysis as a regression task, address the question of generating data that reflects the different states of the physical system, and evaluate the performance of neural architecture search for this task. After obtaining the optimized architectures, we further implement smart data processing and analytics by means of neuron coverage metrics, assessing the capability of these metrics to estimate phase transitions. Our results identify the neuron coverage metric as promising for detecting phase transitions in physical systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Neri, Ferrante; Liao, Lingzhen; Xue, Yu; Basgalupp, Márcio P.
Differentiable architecture search for GANs with rollback mechanism Journal Article
In: Applied Soft Computing, vol. 186, pp. 114055, 2026, ISSN: 1568-4946.
@article{NERI2026114055,
title = {Differentiable architecture search for GANs with rollback mechanism},
author = {Ferrante Neri and Lingzhen Liao and Yu Xue and Márcio P. Basgalupp},
url = {https://www.sciencedirect.com/science/article/pii/S1568494625013687},
doi = {https://doi.org/10.1016/j.asoc.2025.114055},
issn = {1568-4946},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Applied Soft Computing},
volume = {186},
pages = {114055},
abstract = {Designing effective architectures for Generative Adversarial Networks (GANs) remains a challenging task due to training instability and the complexity of balancing generator and discriminator performance. While Neural Architecture Search (NAS) has shown promise in automating architecture design, existing NAS-GAN approaches often suffer from limited design flexibility and high computational demands. This paper introduces a gradient-based NAS framework, termed Differentiable Architecture Search for GANs with Rollback Mechanism (RASGAN), aimed at addressing these limitations. RASGAN incorporates a hyperparameter rollback to indirectly optimise evaluation metrics such as the Inception Score (IS) and Fréchet Inception Distance (FID), leading to higher-quality generative models. Moreover, the search space integrates lightweight convolutional operations to reduce computational and storage overhead without compromising performance. On unconditional image generation tasks, the proposed method achieves competitive results: on CIFAR-10, RASGAN attains IS = 8.98 and FID = 10.31; on STL-10},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Qu, Wei; Chen, Cong; Lu, Wei; Chen, Haodong; Li, Tao
Towards trustworthy and interpretable prediction of school bullying: A NAS-driven framework with Shapley value explanation Journal Article
In: Neurocomputing, vol. 659, pp. 131744, 2026, ISSN: 0925-2312.
@article{QU2026131744,
title = {Towards trustworthy and interpretable prediction of school bullying: A NAS-driven framework with Shapley value explanation},
author = {Wei Qu and Cong Chen and Wei Lu and Haodong Chen and Tao Li},
url = {https://www.sciencedirect.com/science/article/pii/S0925231225024166},
doi = {https://doi.org/10.1016/j.neucom.2025.131744},
issn = {0925-2312},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Neurocomputing},
volume = {659},
pages = {131744},
abstract = {School bullying represents a critical societal challenge with lasting psychological and academic consequences for affected students. Despite recent advances in machine learning for predicting bullying behaviors, conventional models struggle to capture the complex, non-linear relationships among contributing factors, especially under the imbalanced data distributions typical of real-world bullying cases. Furthermore, the inherent opacity of Deep Neural Networks (DNNs) restricts their application in educational contexts where interpretability and actionable insights are essential. In this paper, we propose a novel automated framework that integrates Neural Architecture Search (NAS) with Shapley value-based explanation methods to jointly address performance and interpretability challenges. Our framework automatically identifies optimal DNN architectures tailored for bullying prediction, incorporating mechanisms to handle class imbalance without extensive manual tuning. To address model transparency, we employ a Shapley value analysis pipeline that systematically attributes predictions to key risk factors, offering educators and policymakers principled and quantitative insights. Extensive experiments on publicly available datasets demonstrate that our method significantly outperforms state-of-the-art baselines, achieving notable improvements in Accuracy (+2.58 %), F1-Score (+34.52 %), and AUC (+6.47 %). Importantly, the feature importance rankings from our Shapley analysis closely align with established sociological and educational theories on bullying, affirming the model’s interpretability and practical relevance. Cross-dataset validation further verifies the framework’s generalizability to broader youth behavioral risk prediction tasks. Our code is submitted at https://github.com/clsyc/Bullyingshapley.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Hongzhi; Wang, Chunnan; Song, Xintong; Geng, Fei
BufferNAS: Buffer pool sampling in neural architecture search Journal Article
In: Information Sciences, vol. 726, pp. 122731, 2026, ISSN: 0020-0255.
@article{WANG2026122731,
title = {BufferNAS: Buffer pool sampling in neural architecture search},
author = {Hongzhi Wang and Chunnan Wang and Xintong Song and Fei Geng},
url = {https://www.sciencedirect.com/science/article/pii/S0020025525008679},
doi = {https://doi.org/10.1016/j.ins.2025.122731},
issn = {0020-0255},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Information Sciences},
volume = {726},
pages = {122731},
abstract = {The existing One-shot NAS algorithms can efficiently deal with automatic neural architecture search problem, and have achieved good performance in the real applications. However, they generally adopt fixed sampling strategy or hand-crafted sampling strategy for hypernetwork training. This makes the sampling procedure inefficient reducing their performance. In this paper, we aim to solve this problem, and propose a biased sampling strategy based on buffer pool, named buffer pool neural architecture search (BufferNAS). BufferNAS aims to change the sampling frequency of the hypernetwork, so that the hypernetwork training is biased towards a better sub-network. In BufferNAS, the advantages of the excellent sub-network can be expanded, making it easier to find better neural architectures during the training and search processes. We embed our buffer sampling approach into the existing One-Shot approach, and achieve good results. Final experimental results show that BufferNAS can obtain an 97.4 % accuracy with 3.4 M parameters on CIFAR-10 dataset. By improving the sampling strategy, our method exhibited better performance on image classification tasks.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sun, Xiaoxue; Wang, Hongpeng; Song, Pei-Cheng
Compact Training-Free NAS with Alternating Evolution Game for Medical Image Segmentation Proceedings Article
In: Gee, James C.; Alexander, Daniel C.; Hong, Jaesung; Iglesias, Juan Eugenio; Sudre, Carole H.; Venkataraman, Archana; Golland, Polina; Kim, Jong Hyo; Park, Jinah (Ed.): Medical Image Computing and Computer Assisted Intervention – MICCAI 2025, pp. 108–118, Springer Nature Switzerland, Cham, 2026, ISBN: 978-3-032-05325-1.
@inproceedings{10.1007/978-3-032-05325-1_11,
title = {Compact Training-Free NAS with Alternating Evolution Game for Medical Image Segmentation},
author = {Xiaoxue Sun and Hongpeng Wang and Pei-Cheng Song},
editor = {James C. Gee and Daniel C. Alexander and Jaesung Hong and Juan Eugenio Iglesias and Carole H. Sudre and Archana Venkataraman and Polina Golland and Jong Hyo Kim and Jinah Park},
url = {https://link.springer.com/chapter/10.1007/978-3-032-05325-1_11},
isbn = {978-3-032-05325-1},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
booktitle = {Medical Image Computing and Computer Assisted Intervention – MICCAI 2025},
pages = {108–118},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Neural Architecture Search (NAS) has shown significant potential in designing deep neural networks for medical image segmentation. However, even emerging training-free NAS frameworks often incur substantial computational costs and lengthy search times. To address the critical challenges of computational efficiency and architecture interpretability, the paper proposes a compact training-free NAS framework based on an Alternating Evolution Game (AEG-cTFNAS). The proposed method alternates the search and contribution evaluation of the encoder and decoder within the UNet architecture via alternating games. It employs a truncated normal distribution for compact encoding, sampling, and updating to minimize computational overhead, while Bayesian inference is utilized to estimate the contribution of each block, adaptively adjusting the search strategy and facilitating process visualization. Experimental results on two benchmark datasets reveal that AEG-cTFNAS outperforms both manually designed architectures and NAS-based algorithms, underscoring its efficacy and potential on medical image segmentation. Code is available at https://github.com/spcity/AEG-cTFNAS.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pandala, Madhavi Latha; Periyanayagi, S.
Optimal explainable vision transformer framework for skin cancer diagnosis with neural architecture search feature learning Journal Article
In: Biomedical Signal Processing and Control, vol. 112, pp. 108723, 2026, ISSN: 1746-8094.
@article{PANDALA2026108723,
title = {Optimal explainable vision transformer framework for skin cancer diagnosis with neural architecture search feature learning},
author = {Madhavi Latha Pandala and S. Periyanayagi},
url = {https://www.sciencedirect.com/science/article/pii/S1746809425012340},
doi = {https://doi.org/10.1016/j.bspc.2025.108723},
issn = {1746-8094},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Biomedical Signal Processing and Control},
volume = {112},
pages = {108723},
abstract = {Skin cancer is one of the most prevalent and life-threatening diseases worldwide, making early and accurate detection crucial for improving patient survival rates. Traditional diagnostic methods rely on manual examination by dermatologists, which is subjective and time-consuming. To address these challenges, this research presents an advanced Optimal XAI based Skin Cancer Classification Network (OXAI-SCC-Net) framework for automated skin cancer detection and classification. The proposed methodology integrates multiple novel techniques to enhance accuracy and robustness. Initially, a Neural Architecture Search-Large Network (NASL-Net) is employed for feature extraction, leveraging automated deep-learning architecture search to optimize feature learning. To tackle class imbalance, a Support Vector Machine-Adopted Synthetic Oversampling Technique (SSOT) is utilized, which improves upon Synthetic Minority Oversampling Technic (SMOTE) by generating synthetic samples based on Support Vector Machine (SVM) decision boundaries, ensuring a balanced dataset. Further, Hippopotamus Optimization Algorithm with Explainable Artificial Intelligence (HOA-XAI) is applied for feature selection, reducing computational complexity by selecting the most informative features while minimizing redundant ones. Finally, a Vison Transformer Convolutional Neural Network (VT-CNN) classifier is trained on the optimized dataset to classify skin lesions into different categories. The proposed OXAI-SCC-Net method achieved an accuracy of 99.12%, with precision, recall, and F1-score each at 99.13% on ISIC-2019 dataset. This indicates highly consistent and reliable performance across all evaluation metrics compared to state-of-art approaches.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Xuncheng; Zhang, Weizhan; Yan, Caixia; Wang, Zhiwen; Du, Haipeng
Efficient Real-Time On-Mobile Video Super-Resolution with Automatic Evolutionary Neural Architecture Search Proceedings Article
In: Senn, Walter; Sanguineti, Marcello; Saudargiene, Ausra; Tetko, Igor V.; Villa, Alessandro E. P.; Jirsa, Viktor; Bengio, Yoshua (Ed.): Artificial Neural Networks and Machine Learning – ICANN 2025, pp. 86–97, Springer Nature Switzerland, Cham, 2026, ISBN: 978-3-032-04546-1.
@inproceedings{10.1007/978-3-032-04546-1_8,
title = {Efficient Real-Time On-Mobile Video Super-Resolution with Automatic Evolutionary Neural Architecture Search},
author = {Xuncheng Liu and Weizhan Zhang and Caixia Yan and Zhiwen Wang and Haipeng Du},
editor = {Walter Senn and Marcello Sanguineti and Ausra Saudargiene and Igor V. Tetko and Alessandro E. P. Villa and Viktor Jirsa and Yoshua Bengio},
url = {https://link.springer.com/chapter/10.1007/978-3-032-04546-1_8},
isbn = {978-3-032-04546-1},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
booktitle = {Artificial Neural Networks and Machine Learning – ICANN 2025},
pages = {86–97},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Real-time Video Super-Resolution (VSR) on mobile devices requires balancing sub-33ms latency with quality preservation – a dual challenge that existing NAS methods fail to address due to restrictive search spaces and unproven convergence. We propose an evolutionary NAS framework with three key innovations: (1) An Almost Sure Strong (A.S.S.)-convergent Genetic Algorithm with theoretical guarantees; (2) A bidirectional gene-architecture mapping encoding topological and parametric interdependencies; (3) Automated gene-to-model compilation through repair-reduction rules. Our hardware-aware implementation achieves state-of-the-art performance (+111.8% composite score) on the REDS dataset while reducing latency by 2$$backslashtimes $$×compared to baseline architectures. The convergence-proven GA demonstrates superior optimization stability over conventional NAS approaches, establishing new Pareto frontiers for mobile VSR deployments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Pengjun; Zhu, Yanqiao; Gao, Jian; Kang, Haidong; Ma, Lianbo
MRT-NAS: Boosting Training-Free NAS via Manifold Regularization Proceedings Article
In: Senn, Walter; Sanguineti, Marcello; Saudargiene, Ausra; Tetko, Igor V.; Villa, Alessandro E. P.; Jirsa, Viktor; Bengio, Yoshua (Ed.): Artificial Neural Networks and Machine Learning – ICANN 2025, pp. 1–12, Springer Nature Switzerland, Cham, 2026, ISBN: 978-3-032-04558-4.
@inproceedings{10.1007/978-3-032-04558-4_1,
title = {MRT-NAS: Boosting Training-Free NAS via Manifold Regularization},
author = {Pengjun Chen and Yanqiao Zhu and Jian Gao and Haidong Kang and Lianbo Ma},
editor = {Walter Senn and Marcello Sanguineti and Ausra Saudargiene and Igor V. Tetko and Alessandro E. P. Villa and Viktor Jirsa and Yoshua Bengio},
url = {https://link.springer.com/chapter/10.1007/978-3-032-04558-4_1},
isbn = {978-3-032-04558-4},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
booktitle = {Artificial Neural Networks and Machine Learning – ICANN 2025},
pages = {1–12},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Training-free Neural Architecture Search (NAS) aims to automatically discover high-performing neural networks using zero-cost proxies, which directly predict the network's performance and avoid the resource-intensive training process. In this paper, we observe that existing zero-cost proxies prefer to high complexity (i.e., Param, FLOPs) networks that are challenging to optimize, which decreases the performance of training-free NAS. Although the skip connection can alleviate the optimization difficulties and reduce the complexity level, it faces significant disadvantages in proxy scoring. Therefore, to address the performance collapse issue, we propose the Manifold Regularization for Training-free NAS (MRT-NAS) that improves the proxy's identification ability of the skip connection structure by measuring the similarity between the network's input and output manifold. Notably, MRT-NAS can be used to regularize any zero-cost proxies in a plug-and-play manner. Experimental results across 3 search spaces and 5 real-world tasks validate the effectiveness of MRT-NAS on boosting the performance of all given zero-cost proxies with negligible time cost. Our implementation is available at https://github.com/yoshimatsuu/MRT-NAS},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olouladé, Babatoundé Moctard; Gao, Jianliang; Al-Sabri, Raeed; Chen, Jiamin; Wu, Zhenpeng
Towards Better Graph Anomaly Detection: A Performance-Aware Neural Architecture Search Approach Proceedings Article
In: Senn, Walter; Sanguineti, Marcello; Saudargiene, Ausra; Tetko, Igor V.; Villa, Alessandro E. P.; Jirsa, Viktor; Bengio, Yoshua (Ed.): Artificial Neural Networks and Machine Learning – ICANN 2025, pp. 285–297, Springer Nature Switzerland, Cham, 2026, ISBN: 978-3-032-04558-4.
@inproceedings{10.1007/978-3-032-04558-4_23,
title = {Towards Better Graph Anomaly Detection: A Performance-Aware Neural Architecture Search Approach},
author = {Babatoundé Moctard Olouladé and Jianliang Gao and Raeed Al-Sabri and Jiamin Chen and Zhenpeng Wu},
editor = {Walter Senn and Marcello Sanguineti and Ausra Saudargiene and Igor V. Tetko and Alessandro E. P. Villa and Viktor Jirsa and Yoshua Bengio},
url = {https://link.springer.com/chapter/10.1007/978-3-032-04558-4_23},
isbn = {978-3-032-04558-4},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
booktitle = {Artificial Neural Networks and Machine Learning – ICANN 2025},
pages = {285–297},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Detecting anomalies in graph data is a crucial research area with extensive applications across various domains, including but not limited to fraud detection, cybersecurity, health monitoring, and system failure prediction. Graph neural networks (GNNs) have recently gained popularity for anomaly detection tasks due to their powerful capability to leverage complex relationships and inherent structural patterns within graph-based datasets. However, manual design and fine-tuning of GNN architectures are typically time-consuming processes that require considerable expertise. Furthermore, many existing GNN methods predominantly emphasize spatial graph information while neglecting important spectral features, thereby limiting their effectiveness in clearly distinguishing anomalies during information aggregation. To address these issues, we introduce PAGNAS, a Performance-Aware Graph Neural Architecture Search framework specifically tailored for anomaly detection. PAGNAS integrates spectral-based GNN ranking, improving its ability to identify anomalies within graph structures accurately. Our framework leverages a neural performance ranking predictor that automates the selection and optimization of GNN architectures, significantly reducing manual design efforts. By focusing on the discovery of optimal architectures rather than strictly predicting their exact performance, PAGNAS efficiently navigates the architectural search space. Empirical evaluations across four benchmark datasets confirm the effectiveness of PAGNAS. It consistently achieves competitive or superior AUC scores compared to state-of-the-art baselines. These results highlight the robustness of PAGNAS in synthesizing high-performing GNN architectures for anomaly detection in attributed graphs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Xingyu; Ji, Junzhong; Liu, Gan; Xiao, Yadong
PE-RBNAS: A robust neural architecture search with progressive-enhanced strategies for brain network classification Journal Article
In: Medical Image Analysis, vol. 107, pp. 103813, 2026, ISSN: 1361-8415.
@article{WANG2026103813,
title = {PE-RBNAS: A robust neural architecture search with progressive-enhanced strategies for brain network classification},
author = {Xingyu Wang and Junzhong Ji and Gan Liu and Yadong Xiao},
url = {https://www.sciencedirect.com/science/article/pii/S1361841525003597},
doi = {https://doi.org/10.1016/j.media.2025.103813},
issn = {1361-8415},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Medical Image Analysis},
volume = {107},
pages = {103813},
abstract = {Functional Brain Network (FBN) classification methods based on Neural Architecture Search (NAS) have been increasingly emerging, with their core advantage being the ability to automatically construct high-quality network architectures. However, existing methods exhibit poor robustness when dealing with FBNs that have inherent high-noise characteristics. To address these issues, we propose a robust NAS with progressive-enhanced strategies for FBN classification. Specifically, this method adopts Particle Swarm Optimization as the search method, while treating candidate architectures as individuals, and proposes two progressive-enhanced (PE) strategies to optimize the critical stages of population sampling and fitness evaluation. In the population sampling stage, we first utilize Latin Hypercube Sampling to initialize a small-scale population, ensuring a broad search range. Subsequently, to reduce random fluctuations in searches, we propose a PE supplementary sampling strategy that identifies advantageous regions of the solution space, and performs precise supplementary sampling of the population. In the fitness evaluation stage, to enhance the noise resistance of the searched architectures, we propose a PE fitness evaluation strategy. This strategy first evaluates individual fitness separately using both original data and artificially constructed noise-augmented data, then combines the two fitness scores through a novel progressive formula to determine the final individual fitness. Experiments were conducted on two public datasets: the ABIDE I dataset (1,112 subjects, 17 sites), and ADHD-200 (776 subjects, 8 sites), using AAL/CC200 atlases. Results demonstrate that PE-RBNAS achieves state-of-the-art performance, with 72.61% accuracy on clean ABIDE I data (vs. 71.05% for MC-APSONAS) and 71.82% accuracy under 0.2 noise (vs. 68.15% for PSO-BNAS). The results indicate that, compared to other methods, the proposed method demonstrates better model performance and superior noise resistance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xie, Yu; Chang, Yu; Li, Ming; Qin, A. K.; Zhang, Xialei
AutoSGRL: Automated framework construction for self-supervised graph representation learning Journal Article
In: Neural Networks, vol. 194, pp. 108119, 2026, ISSN: 0893-6080.
@article{XIE2026108119,
title = {AutoSGRL: Automated framework construction for self-supervised graph representation learning},
author = {Yu Xie and Yu Chang and Ming Li and A. K. Qin and Xialei Zhang},
url = {https://www.sciencedirect.com/science/article/pii/S0893608025009992},
doi = {https://doi.org/10.1016/j.neunet.2025.108119},
issn = {0893-6080},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Neural Networks},
volume = {194},
pages = {108119},
abstract = {Automated machine learning (AutoML) is a promising solution for building a machine learning framework without human assistance and has attracted significant attention throughout the computational intelligence research community. Although there has been an emerging interest in graph neural architecture search, current research focuses on the specific design of semi-supervised or supervised graph neural networks. Motivated by this, we propose a novel method that enables the automatic construction of flexible self-supervised graph representation learning frameworks for the first time as far as we know, referred to as AutoSGRL. Based on existing self-supervised graph contrastive learning methods, AutoSGRL establishes a framework search space for self-supervised graph representation learning, which encompasses data augmentation strategies and proxy tasks for constructing graph contrastive learning frameworks, and the hyperparameters required for model training. Then, we implement an automatic search engine based on genetic algorithms, which constructs multiple self-supervised graph representation learning frameworks as the initial population. By simulating the process of biological evolution including selection, crossover, and mutation, the search engine iteratively evolves the population to identify high-performed frameworks and optimal hyperparameters. Empirical studies demonstrate that our AutoSGRL achieves comparative or even better performance than state-of-the-art manual-designed self-supervised graph representation learning methods and semi-supervised graph neural architecture search methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xie, Weisheng; Gao, Xiangxiang; Fang, Xuwei; Li, Hui; Hang, Chen; Li, Shaoyuan
EQUINAS: Equilibrium-guided differentiable neural architecture search Journal Article
In: Expert Systems with Applications, vol. 298, pp. 129711, 2026, ISSN: 0957-4174.
@article{XIE2026129711,
title = {EQUINAS: Equilibrium-guided differentiable neural architecture search},
author = {Weisheng Xie and Xiangxiang Gao and Xuwei Fang and Hui Li and Chen Hang and Shaoyuan Li},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425033263},
doi = {https://doi.org/10.1016/j.eswa.2025.129711},
issn = {0957-4174},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Expert Systems with Applications},
volume = {298},
pages = {129711},
abstract = {Recent research has significantly mitigated the performance collapse issue in Differentiable Architecture Search (DARTS) by either refining architecture parameters to better reflect the true strengths of operations or developing alternative metrics for evaluating operation significance. However, the actual role and impact of architecture parameters remain insufficiently explored, creating critical ambiguities in the search process. To address this gap, we conduct a rigorous theoretical analysis demonstrating that the change rate of architecture parameters reflects the sensitivity of the supernet’s validation loss in architecture space, thereby influencing the derived architecture’s performance by shaping supernet training dynamics. Building on these insights, we introduce the concept of a Stable Equilibrium State to capture the stability of the bi-level optimization process and propose the Equilibrium Influential (EI) metric to assess operation importance. By integrating these elements, we propose EQUINAS, a differentiable NAS approach that leverages the Stable Equilibrium State to identify the optimal state during the search process and derives the final architecture using the EI metric. Extensive experiments across diverse datasets and search spaces demonstrate that EQUINAS achieves competitive test accuracy compared to state-of-the-art methods while significantly reducing search costs. Additionally, EQUINAS shows remarkable performance in Transformer-based architectures and excels in real-world applications such as image classification and text recognition.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Weiduo; Dong, Xiaoshe; Wang, Qiang
DP-SWAP: Fast Swapping Strategy Based on Dynamic Programming Journal Article
In: Future Generation Computer Systems, vol. 175, pp. 108071, 2026, ISSN: 0167-739X.
@article{CHEN2026108071,
title = {DP-SWAP: Fast Swapping Strategy Based on Dynamic Programming},
author = {Weiduo Chen and Xiaoshe Dong and Qiang Wang},
url = {https://www.sciencedirect.com/science/article/pii/S0167739X25003656},
doi = {https://doi.org/10.1016/j.future.2025.108071},
issn = {0167-739X},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Future Generation Computer Systems},
volume = {175},
pages = {108071},
abstract = {Neural Architecture Search (NAS) has emerged as an effective approach for automating neural network design. However, NAS imposes significant GPU memory pressure due to the need to evaluate numerous candidate models during training. While tensor swapping helps reduce memory usage, existing tensor selection methods rely on extensive iterative searches, which require repeatedly traversing model computation graphs to evaluate the impact of swapping schemes–leading to high time complexity and poor scalability in dynamic NAS scenarios. To address this issue, we propose DP-SWAP, a novel tensor swapping strategy based on dynamic programming. By leveraging the optimal substructure property of the tensor selection problem, DP-SWAP computes effective swapping schemes with only O(n) time complexity, allows for fast and adaptive decision-making during NAS model exploration. Experimental results show that DP-SWAP achieves training performance comparable to state-of-the-art heuristic methods, while reducing swapping decision time by over 3 orders of magnitude, thus effectively alleviating GPU memory bottlenecks in NAS.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Jian; Zhu, Yunlong; Dong, Zhicheng; Yang, Hucheng; Cheng, Xudong; Xue, Zhenyu
A lightweight arc fault detection model integrating multi-objective architecture search with dynamic noise-augmented training Journal Article
In: Measurement, vol. 257, pp. 118649, 2026, ISSN: 0263-2241.
@article{LI2026118649,
title = {A lightweight arc fault detection model integrating multi-objective architecture search with dynamic noise-augmented training},
author = {Jian Li and Yunlong Zhu and Zhicheng Dong and Hucheng Yang and Xudong Cheng and Zhenyu Xue},
url = {https://www.sciencedirect.com/science/article/pii/S0263224125020081},
doi = {https://doi.org/10.1016/j.measurement.2025.118649},
issn = {0263-2241},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Measurement},
volume = {257},
pages = {118649},
abstract = {Ensuring accurate and efficient arc fault detection is critical for the safety and reliability of modern electrical systems, particularly in embedded and resource-constrained environments. This paper presents a lightweight convolutional neural network (CNN) model optimized through a multi-objective genetic algorithm (NSGA-II) to achieve a balance between detection accuracy, computational complexity, and noise robustness. The proposed model integrates Squeeze-and-Excitation (SE) attention mechanisms, depthwise separable convolutions, and dynamic Gaussian noise augmentation during training to enhance generalization under noisy conditions. Neural architecture search is employed to automatically design compact yet high-performing architectures, with the final model achieving an F1-score of 99.49 % using only 2529 parameters. The model is validated experimentally on a Raspberry Pi 4B platform, demonstrating an average inference time of 0.785 ms per sample, thereby confirming its real-time detection capability. This study offers a robust, efficient, and practical solution for arc fault diagnosis in embedded industrial applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yang, Yeming; Liu, Zhihao; Wong, Ka-Chun; Lin, Qiuzhen; Luo, Jianping; Li, Jianqiang
Evolutionary multi-task robust architecture search for network intrusion detection Journal Article
In: Expert Systems with Applications, vol. 296, pp. 128899, 2026, ISSN: 0957-4174.
@article{YANG2026128899,
title = {Evolutionary multi-task robust architecture search for network intrusion detection},
author = {Yeming Yang and Zhihao Liu and Ka-Chun Wong and Qiuzhen Lin and Jianping Luo and Jianqiang Li},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425025163},
doi = {https://doi.org/10.1016/j.eswa.2025.128899},
issn = {0957-4174},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Expert Systems with Applications},
volume = {296},
pages = {128899},
abstract = {Network Intrusion Detection (NID) becomes a key technology for ensuring network security. Recent researchers have proposed various NID systems based on neural networks. However, these networks require expensive expert knowledge for manual design, which is tedious and time-consuming. Moreover, they easily suffer from adversarial attacks, which limits their application in safety-critical scenarios. To alleviate the above problems, this paper proposes an evolutionary multi-task robust architecture search method, called EMR-NID, which can automatically design robust architectures for NID systems. First, we design an architecture transfer update strategy that achieves information sharing and knowledge transfer between different tasks. Then, we develop an architecture performance correction strategy that enhances the efficiency of robust search and strengthens NID’s defense capability. Finally, our EMR-NID method is validated on three well-known NID datasets, i.e., NSL-KDD, UNSW-NB15, and Edge-IIoTset. The experimental results show that EMR-NID can outperform some state-of-the-art NID methods in terms of clean and robust accuracy under multiple scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Poyser, Matt; Breckon, Toby P.
DDS-NAS: Dynamic data selection within neural architecture search via on-line hard example mining applied to image classification Journal Article
In: Pattern Recognition, vol. 169, pp. 111948, 2026, ISSN: 0031-3203.
@article{POYSER2026111948,
title = {DDS-NAS: Dynamic data selection within neural architecture search via on-line hard example mining applied to image classification},
author = {Matt Poyser and Toby P. Breckon},
url = {https://www.sciencedirect.com/science/article/pii/S0031320325006089},
doi = {https://doi.org/10.1016/j.patcog.2025.111948},
issn = {0031-3203},
year = {2026},
date = {2026-01-01},
urldate = {2026-01-01},
journal = {Pattern Recognition},
volume = {169},
pages = {111948},
abstract = {In order to address the scalability challenge within Neural Architecture Search (NAS), we speed up NAS training via dynamic hard example mining within a curriculum learning framework. By utilising an autoencoder that enforces an image similarity embedding in latent space, we construct an efficient kd-tree structure to order images by furthest neighbour dissimilarity in a low-dimensional embedding. From a given query image from our subsample dataset, we can identify the most dissimilar image within the global dataset in logarithmic time. Via curriculum learning, we then dynamically re-formulate an unbiased subsample dataset for NAS optimisation, upon which the current NAS solution architecture performs poorly. We show that our DDS-NAS framework speeds up gradient-based NAS strategies by up to 27× without loss in performance. By maximising the contribution of each image sample during training, we reduce the duration of a NAS training cycle and the number of iterations required for convergence.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2025
Wang, Weiqi; Bao, Feilong; Xing, Zhecong; Lian, Zhe
A Survey: Research Progress of Feature Fusion Technology Journal Article
In: 2025.
@article{wangsurvey,
title = {A Survey: Research Progress of Feature Fusion Technology},
author = {Weiqi Wang and Feilong Bao and Zhecong Xing and Zhe Lian},
url = {http://poster-openaccess.com/files/ICIC2024/862.pdf},
year = {2025},
date = {2025-12-01},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
MACHINE-GENERATED NEURAL NETWORKS FOR SHORT-TERM LOAD FORECASTING Collection
2025.
@collection{nokey,
title = { MACHINE-GENERATED NEURAL NETWORKS FOR SHORT-TERM LOAD FORECASTING},
author = {Gergana Vacheva and Plamen Stanchev and Nikolay Hinov
},
url = {https://unitechsp.tugab.bg/images/2024/1-EE/s1_p143_v1.pdf},
year = {2025},
date = {2025-12-01},
urldate = {2025-12-01},
booktitle = {International Scientific Conference UNITECH`2024},
journal = {International Scientific Conference UNITECH`2024},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Avval, Sasan Salmani Pour; Abadie, Titouan; Yaghoubi, Vahid
Automated neural network generation for industrial datasets, application to laser powder bed fusion Journal Article
In: Journal of Intelligent Manufacturing , 2025.
@article{nokey,
title = {Automated neural network generation for industrial datasets, application to laser powder bed fusion},
author = {
Sasan Salmani Pour Avval and Titouan Abadie and Vahid Yaghoubi
},
url = {https://link.springer.com/article/10.1007/s10845-025-02707-0},
year = {2025},
date = {2025-11-01},
urldate = {2025-11-01},
journal = { Journal of Intelligent Manufacturing },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
2025.
@collection{nokey,
title = {Interpreting NAS-Optimized Transformer Models for Remaining Useful Life Prediction Using Gradient Explainer},
author = {Messaouda Nekkaa and Mohamed Abdouni and Dalila Boughaci},
url = {https://annals-csis.org/Volume_44/drp/pdf/8176.pdf},
year = {2025},
date = {2025-11-01},
booktitle = {20th Conference on Computer Science and Intelligence Systems},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Wang, Yu
Architecture Optimization and Data-Efficient Methods in Machine Learning PhD Thesis
2025.
@phdthesis{nokey,
title = {Architecture Optimization and Data-Efficient Methods in Machine Learning},
author = {Wang, Yu},
url = {https://escholarship.org/content/qt4rh0v7fg/qt4rh0v7fg.pdf},
year = {2025},
date = {2025-11-01},
urldate = {2025-11-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Balmain, Nate; Jiao, Lei; Li, Jun
Efficient IoT Anomaly Detection via Graph-Based Neural Architecture Search Miscellaneous
2025.
@misc{nokey,
title = {Efficient IoT Anomaly Detection via Graph-Based Neural Architecture Search},
author = {Nate Balmain and Lei Jiao and Jun Li},
url = {https://ljiao.github.io/papers/vcc25.pdf},
year = {2025},
date = {2025-11-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Liu, Ziti; Liu, Yang; Yan, Xunshi; Liu, Wen; Nie, Han; Guo, Shuaiqi; Zhang, Chen-an
Automatic network structure discovery of physics informed neural networks via knowledge distillation Journal Article
In: nature communications, 2025.
@article{nokey,
title = {Automatic network structure discovery of physics informed neural networks via knowledge distillation},
author = {
Ziti Liu and Yang Liu and Xunshi Yan and Wen Liu and Han Nie and Shuaiqi Guo and Chen-an Zhang
},
url = {https://www.nature.com/articles/s41467-025-64624-3},
year = {2025},
date = {2025-11-01},
urldate = {2025-11-01},
journal = {nature communications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Wenbin; Li, Juanjuan; Zeng, Yiliang; Meng, Na; Zhao, Jiahong
A dual-channel hyperspectral classification method based on NAS and transformer Journal Article
In: scientific reports , 2025.
@article{liu-sr25a,
title = {A dual-channel hyperspectral classification method based on NAS and transformer},
author = {
Wenbin Liu and Juanjuan Li and Yiliang Zeng and Na Meng and Jiahong Zhao
},
url = {https://www.nature.com/articles/s41598-025-21399-3},
year = {2025},
date = {2025-11-01},
urldate = {2025-11-01},
journal = { scientific reports },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Huanga, Kai; Maoa, Yingchi; Zhanga, Benteng; Chena, Yihan; Chena, Yuchu; Wu, Jie
Efficient Zero-Cost Neural Architecture Search for Personalized AI Systems in Cloud-Edge Networks Miscellaneous
2025.
@misc{Huanga-25a,
title = {Efficient Zero-Cost Neural Architecture Search for Personalized AI Systems in Cloud-Edge Networks},
author = {Kai Huanga and Yingchi Maoa and Benteng Zhanga and Yihan Chena and Yuchu Chena and Jie Wu},
url = {https://cis.temple.edu/~jiewu/research/publications/Publication_files/MASS2025_Efficient%20Zero-Cost%20Neural%20Architecture%20Search%20for%20Personalized%20AI%20Systems%20in%20Cl....pdf},
year = {2025},
date = {2025-11-01},
urldate = {2025-11-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Muthusamy, Sudhakar; Ramu, Swarna Priya
In: scientific reports , 2025.
@article{Muthusamy-sr25a,
title = {A neural architecture search optimized lightweight attention ensemble model for nutrient deficiency and severity assessment in diverse crop leaves},
author = {
Sudhakar Muthusamy and Swarna Priya Ramu
},
url = {https://www.nature.com/articles/s41598-025-20124-4},
year = {2025},
date = {2025-11-01},
urldate = {2025-11-01},
journal = {scientific reports },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fu, Wenjie; Cui, Wei; Zhang, Shuai; Li, Zhipeng; Chen, Lei; Li, Peng
Evolutionary Graph Neural Architecture Search with Mask Predictor for Power Distribution Network Fault Detection Journal Article
In: Intelligent Computing, vol. 0, no. ja, 2025.
@article{<LineBreak>doi:10.34133/icomputing.0229,
title = {Evolutionary Graph Neural Architecture Search with Mask Predictor for Power Distribution Network Fault Detection},
author = {Wenjie Fu and Wei Cui and Shuai Zhang and Zhipeng Li and Lei Chen and Peng Li},
url = {https://spj.science.org/doi/abs/10.34133/icomputing.0229},
doi = {10.34133/icomputing.0229},
year = {2025},
date = {2025-11-01},
journal = {Intelligent Computing},
volume = {0},
number = {ja},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
Transparent Vision: A Theory of Hierarchical Invariant Representations Collection
2025.
@collection{nokey,
title = {Transparent Vision: A Theory of Hierarchical Invariant Representations},
author = {Shuren Qi and Yushu Zhang and Chao Wang and Zhihua Xia and Xiaochun Cao and Fenglei Fan},
url = {https://openaccess.thecvf.com/content/ICCV2025/papers/Qi_Transparent_Vision_A_Theory_of_Hierarchical_Invariant_Representations_ICCV_2025_paper.pdf},
year = {2025},
date = {2025-11-01},
urldate = {2025-11-01},
booktitle = {ICCV 2025},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
Beyond the Limits: Overcoming Negative Correlation of Activation-Based Training-Free NAS Collection
2025.
@collection{nokey,
title = {Beyond the Limits: Overcoming Negative Correlation of Activation-Based Training-Free NAS},
author = {Haidong Kang and Lianbo Ma and Pengjun Chen and Guo Yu and Xingwei Wang and Min Huang},
url = {https://openaccess.thecvf.com/content/ICCV2025/papers/Kang_Beyond_the_Limits_Overcoming_Negative_Correlation_of_Activation-Based_Training-Free_NAS_ICCV_2025_paper.pdf},
year = {2025},
date = {2025-11-01},
booktitle = {ICCV 25},
journal = {ICCV 25},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
Neural Architecture Search Driven by Locally Guided Diffusion for Personalized Federated Learning Collection
2025.
@collection{Liao-iccv25a,
title = {Neural Architecture Search Driven by Locally Guided Diffusion for Personalized Federated Learning},
author = {Peng Liao and Xilu Wang and Yaochu Jin and Wenli Du and Han Hu},
url = {https://openaccess.thecvf.com/content/ICCV2025/papers/Liao_Neural_Architecture_Search_Driven_by_Locally_Guided_Diffusion_for_Personalized_ICCV_2025_paper.pdf},
year = {2025},
date = {2025-11-01},
urldate = {2025-11-01},
booktitle = {ICCV 25},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
TRNAS: A Training-Free Robust Neural Architecture Search Collection
2025.
@collection{Yang-iccv25a,
title = {TRNAS: A Training-Free Robust Neural Architecture Search},
author = {Yeming Yang and Qingling Zhu and Jianping Luo and Ka-Chun Wong and Qiuzhen Lin and Jianqiang Li},
url = {https://openaccess.thecvf.com/content/ICCV2025/papers/Yang_TRNAS_A_Training-Free_Robust_Neural_Architecture_Search_ICCV_2025_paper.pdf},
year = {2025},
date = {2025-11-01},
booktitle = {ICCV 2025},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Ankalaki, Shilpa
Automating wastewater characteristic parameter quantitation using neural architecture search in AutoML systems on spectral reflectance data Journal Article
In: scientific reports , 2025.
@article{Ankalaki-sr25a,
title = {Automating wastewater characteristic parameter quantitation using neural architecture search in AutoML systems on spectral reflectance data},
author = { Shilpa Ankalaki },
url = {https://www.nature.com/articles/s41598-025-21069-4},
year = {2025},
date = {2025-10-02},
urldate = {2025-10-02},
journal = {scientific reports },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ahmad, Afzal
2025.
@phdthesis{Ahmad-phd25a,
title = {Searching for Efficiency: Automated Search for Hardware-Aware Neural Networks and High-Performance FPGA Accelerators},
author = {Afzal Ahmad},
url = {https://www.proquest.com/docview/3261504506?pq-origsite=gscholar&fromopenview=true&sourcetype=Dissertations%20&%20Theses},
year = {2025},
date = {2025-10-02},
urldate = {2025-10-02},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
