Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2023
Dong, Dong; Jiang, Hongxu; Wei, Xuekai; Song, Yanfei; Zhuang, Xu; Wang, Jason
ETNAS: An energy consumption task-driven neural architecture search Journal Article
In: Sustainable Computing: Informatics and Systems, vol. 40, pp. 100926, 2023, ISSN: 2210-5379.
@article{DONG2023100926,
title = {ETNAS: An energy consumption task-driven neural architecture search},
author = {Dong Dong and Hongxu Jiang and Xuekai Wei and Yanfei Song and Xu Zhuang and Jason Wang},
url = {https://www.sciencedirect.com/science/article/pii/S2210537923000811},
doi = {https://doi.org/10.1016/j.suscom.2023.100926},
issn = {2210-5379},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Sustainable Computing: Informatics and Systems},
volume = {40},
pages = {100926},
abstract = {Neural Architecture Search (NAS) is crucial in the field of sustainable computing as it facilitates the development of highly efficient and effective neural networks. However, it cannot automate the deployment of neural networks to accommodate specific hardware resources and task requirements. This paper introduces ETNAS, which is a hardware-aware multi-objective optimal neural network architecture search algorithm based on the differentiable neural network architecture search method (DARTS). The algorithm searches for a lower-power neural network architecture with guaranteed inference accuracy by modifying the loss function of the differentiable neural network architecture search. We modify the dense network in DARTS to simultaneously search for networks with a lower memory footprint, enabling them to run on memory-constrained edge-end devices. We collected data on the power consumption and time consumption of numerous common operators on FPGA and Domain-Specific Architectures (DSA). The experimental results demonstrate that ETNAS achieves comparable accuracy performance and time efficiency while consuming less power compared to state-of-the-art algorithms, thereby validating its effectiveness in practical applications and contributing to the reduction of carbon emissions in intelligent cyber–physical systems (ICPS) edge computing inference.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tang, Siao; Wang, Xin; Chen, Hong; Guan, Chaoyu; Tang, Yansong; Zhu, Wenwu
Lightweight Diffusion Models with Distillation-Based Block Neural Architecture Search Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-04950,
title = {Lightweight Diffusion Models with Distillation-Based Block Neural Architecture Search},
author = {Siao Tang and Xin Wang and Hong Chen and Chaoyu Guan and Yansong Tang and Wenwu Zhu},
url = {https://doi.org/10.48550/arXiv.2311.04950},
doi = {10.48550/ARXIV.2311.04950},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.04950},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Yuke; Baik, Jiwon; Rahman, Md Marufi; Anagnostopoulos, Iraklis; Li, Ruopu; Shu, Tong
Pareto Optimization of CNN Models via Hardware-Aware Neural Architecture Search for Drainage Crossing Classification on Resource-Limited Devices Proceedings Article
In: Proceedings of the SC '23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis, pp. 1767–1775, Association for Computing Machinery, Denver, CO, USA, 2023, ISBN: 9798400707858.
@inproceedings{10.1145/3624062.3624258,
title = {Pareto Optimization of CNN Models via Hardware-Aware Neural Architecture Search for Drainage Crossing Classification on Resource-Limited Devices},
author = {Yuke Li and Jiwon Baik and Md Marufi Rahman and Iraklis Anagnostopoulos and Ruopu Li and Tong Shu},
url = {https://doi.org/10.1145/3624062.3624258},
doi = {10.1145/3624062.3624258},
isbn = {9798400707858},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Proceedings of the SC '23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis},
pages = {1767–1775},
publisher = {Association for Computing Machinery},
address = {Denver, CO, USA},
series = {SC-W '23},
abstract = {Embedded devices, constrained by limited memory and processors, require deep learning models to be tailored to their specifications. This research explores customized model architectures for classifying drainage crossing images. Building on the foundational ResNet-18, this paper aims to maximize prediction accuracy, reduce memory size, and minimize inference latency. Various configurations were systematically probed by leveraging hardware-aware neural architecture search, accumulating 1,717 experimental results over six benchmarking variants. The experimental data analysis, enhanced by nn-Meter, provided a comprehensive understanding of inference latency across four different predictors. Significantly, a Pareto front analysis with three objectives of accuracy, latency, and memory resulted in five non-dominated solutions. These standout models showcased efficiency while retaining accuracy, offering a compelling alternative to the conventional ResNet-18 when deployed in resource-constrained environments. The paper concludes by highlighting insights drawn from the results and suggesting avenues for future exploration.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hsieh, Jun-Wei; Chou, Cheng-Han; Chang, Ming-Ching; Chen, Ping-Yang; Santra, Santanu; Huang, Chih-Sheng
Mean-Shift Based Differentiable Architecture Search Journal Article
In: IEEE Transactions on Artificial Intelligence, pp. 1-11, 2023.
@article{10310657,
title = {Mean-Shift Based Differentiable Architecture Search},
author = {Jun-Wei Hsieh and Cheng-Han Chou and Ming-Ching Chang and Ping-Yang Chen and Santanu Santra and Chih-Sheng Huang},
url = {https://ieeexplore.ieee.org/abstract/document/10310657},
doi = {10.1109/TAI.2023.3329792},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Artificial Intelligence},
pages = {1-11},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chang, Chi-Chih; Sung, Yuan-Yao; Yu, Shixing; Huang, Ning-Chi; Marculescu, Diana; Wu, Kai-Chiang
FLORA: Fine-grained Low-Rank Architecture Search for Vision Transformer Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-03912,
title = {FLORA: Fine-grained Low-Rank Architecture Search for Vision Transformer},
author = {Chi-Chih Chang and Yuan-Yao Sung and Shixing Yu and Ning-Chi Huang and Diana Marculescu and Kai-Chiang Wu},
url = {https://doi.org/10.48550/arXiv.2311.03912},
doi = {10.48550/ARXIV.2311.03912},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.03912},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chang, Chen-Chia; Pan, Jingyu; Xie, Zhiyao; Zhang, Tunhou; Hu, Jiang; Chen, Yiran
Towards Fully Automated Machine Learning for Routability Estimator Development Journal Article
In: IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, pp. 1-1, 2023.
@article{10310247,
title = {Towards Fully Automated Machine Learning for Routability Estimator Development},
author = {Chen-Chia Chang and Jingyu Pan and Zhiyao Xie and Tunhou Zhang and Jiang Hu and Yiran Chen},
url = {https://ieeexplore.ieee.org/abstract/document/10310247},
doi = {10.1109/TCAD.2023.3330818},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
An, Taegun; Joo, Changhee
CycleGANAS: Differentiable Neural Architecture Search for CycleGAN Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-07162,
title = {CycleGANAS: Differentiable Neural Architecture Search for CycleGAN},
author = {Taegun An and Changhee Joo},
url = {https://doi.org/10.48550/arXiv.2311.07162},
doi = {10.48550/ARXIV.2311.07162},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.07162},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ji, Junzhong; Wang, Xingyu
Convolutional architecture search based on particle swarm algorithm for functional brain network classification Journal Article
In: Applied Soft Computing, vol. 149, pp. 111049, 2023, ISSN: 1568-4946.
@article{JI2023111049,
title = {Convolutional architecture search based on particle swarm algorithm for functional brain network classification},
author = {Junzhong Ji and Xingyu Wang},
url = {https://www.sciencedirect.com/science/article/pii/S1568494623010670},
doi = {https://doi.org/10.1016/j.asoc.2023.111049},
issn = {1568-4946},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Applied Soft Computing},
volume = {149},
pages = {111049},
abstract = {The functional brain network (FBN) classification based on convolutional neural networks (CNN) is of great significance for discovery and diagnosis of brain diseases, and has attracted increasing attention. However, all the CNN architectures of current studies mainly depend on hand-crafted, which are labor-intensive and unreliable. To solve it, we propose a neural architecture search (NAS) method based on particle swarm optimization, to automatically design the CNN architecture for FBN classification. Specifically, this method includes three phases, namely the individual expression phase, the individual evaluation phase, and the individual update phase. In the first phase, we treat the neural architecture as the individual in particle swarm. The individual vector consists of six elements, and the value of each element represents the number of a special convolution operation. The six special convolution operations can effectively extract brain network multilevel topological features. In the second phase, we propose a novel surrogate-assisted predictor to evaluate the fitness of the individuals more efficiently. In the last phase, we apply the predicted fitness to acquire the historical optimum of each individual and the global optimum of the population, and use them to update all individuals in the particle swarm. The second and third phases are repeatedly performed until the end condition is met. Experiments on benchmark datasets demonstrate that the CNN architecture searched by our method achieves better classification performance than state-of-the-art hand-crafted CNN architectures.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wen, Wei; Liu, Kuang-Hung; Fedorov, Igor; Zhang, Xin; Yin, Hang; Chu, Weiwei; Hassani, Kaveh; Sun, Mengying; Liu, Jiang; Wang, Xu; Jiang, Lin; Chen, Yuxin; Zhang, Buyun; Liu, Xi; Cheng, Dehua; Chen, Zhengxing; Zhao, Guang; Han, Fangqiu; Yang, Jiyan; Hao, Yuchen; Xiong, Liang; Chen, Wen-Yen
Rankitect: Ranking Architecture Search Battling World-class Engineers at Meta Scale Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-08430,
title = {Rankitect: Ranking Architecture Search Battling World-class Engineers at Meta Scale},
author = {Wei Wen and Kuang-Hung Liu and Igor Fedorov and Xin Zhang and Hang Yin and Weiwei Chu and Kaveh Hassani and Mengying Sun and Jiang Liu and Xu Wang and Lin Jiang and Yuxin Chen and Buyun Zhang and Xi Liu and Dehua Cheng and Zhengxing Chen and Guang Zhao and Fangqiu Han and Jiyan Yang and Yuchen Hao and Liang Xiong and Wen-Yen Chen},
url = {https://doi.org/10.48550/arXiv.2311.08430},
doi = {10.48550/ARXIV.2311.08430},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.08430},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Zhenrong; Li, Bin; Li, Weifeng; Niu, Shuanlong; Miao, Wang; Niu, Tongzhi
NAS-ASDet: An Adaptive Design Method for Surface Defect Detection Network using Neural Architecture Search Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-10952,
title = {NAS-ASDet: An Adaptive Design Method for Surface Defect Detection Network using Neural Architecture Search},
author = {Zhenrong Wang and Bin Li and Weifeng Li and Shuanlong Niu and Wang Miao and Tongzhi Niu},
url = {https://doi.org/10.48550/arXiv.2311.10952},
doi = {10.48550/ARXIV.2311.10952},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.10952},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hu, Yiming; Chu, Xiangxiang; Zhang, Bo
Masked Autoencoders Are Robust Neural Architecture Search Learners Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-12086,
title = {Masked Autoencoders Are Robust Neural Architecture Search Learners},
author = {Yiming Hu and Xiangxiang Chu and Bo Zhang},
url = {https://doi.org/10.48550/arXiv.2311.12086},
doi = {10.48550/ARXIV.2311.12086},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.12086},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Han, Xiaoyu; Li, Chenyu; Wang, Zifan; Liu, Guohua
NDARTS: A Differentiable Architecture Search Based on the Neumann Series Journal Article
In: Algorithms, vol. 16, no. 12, 2023, ISSN: 1999-4893.
@article{a16120536,
title = {NDARTS: A Differentiable Architecture Search Based on the Neumann Series},
author = {Xiaoyu Han and Chenyu Li and Zifan Wang and Guohua Liu},
url = {https://www.mdpi.com/1999-4893/16/12/536},
doi = {10.3390/a16120536},
issn = {1999-4893},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Algorithms},
volume = {16},
number = {12},
abstract = {Neural architecture search (NAS) has shown great potential in discovering powerful and flexible network models, becoming an important branch of automatic machine learning (AutoML). Although search methods based on reinforcement learning and evolutionary algorithms can find high-performance architectures, these search methods typically require hundreds of GPU days. Unlike searching in a discrete search space based on reinforcement learning and evolutionary algorithms, the differentiable neural architecture search (DARTS) continuously relaxes the search space, allowing for optimization using gradient-based methods. Based on DARTS, we propose NDARTS in this article. The new algorithm uses the Implicit Function Theorem and the Neumann series to approximate the hyper-gradient, which obtains better results than DARTS. In the simulation experiment, an ablation experiment was carried out to study the influence of the different parameters on the NDARTS algorithm and to determine the optimal weight, then the best performance of the NDARTS algorithm was searched for in the DARTS search space and the NAS-BENCH-201 search space. Compared with other NAS algorithms, the results showed that NDARTS achieved excellent results on the CIFAR-10, CIFAR-100, and ImageNet datasets, and was an effective neural architecture search algorithm.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Xiangyi; Guo, Jiajia; Wen, Chao-Kai; Jin, Shi
Auto-CsiNet: Scenario-customized Automatic Neural Network Architecture Generation for Massive MIMO CSI Feedback Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-15950,
title = {Auto-CsiNet: Scenario-customized Automatic Neural Network Architecture Generation for Massive MIMO CSI Feedback},
author = {Xiangyi Li and Jiajia Guo and Chao-Kai Wen and Shi Jin},
url = {https://doi.org/10.48550/arXiv.2311.15950},
doi = {10.48550/ARXIV.2311.15950},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.15950},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hafiz, Faizal M. F.; Broekaert, Jan; Swain, Akshya
Evolution of Neural Architectures for Financial Forecasting: A Note on Data Incompatibility during Crisis Periods Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-14604,
title = {Evolution of Neural Architectures for Financial Forecasting: A Note on Data Incompatibility during Crisis Periods},
author = {Faizal M. F. Hafiz and Jan Broekaert and Akshya Swain},
url = {https://doi.org/10.48550/arXiv.2311.14604},
doi = {10.48550/ARXIV.2311.14604},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.14604},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wei, Zimian; Pan, Hengyue; Li, Lujun; Dong, Peijie; Tian, Zhiliang; Niu, Xin; Li, Dongsheng
TVT: Training-Free Vision Transformer Search on Tiny Datasets Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-14337,
title = {TVT: Training-Free Vision Transformer Search on Tiny Datasets},
author = {Zimian Wei and Hengyue Pan and Lujun Li and Peijie Dong and Zhiliang Tian and Xin Niu and Dongsheng Li},
url = {https://doi.org/10.48550/arXiv.2311.14337},
doi = {10.48550/ARXIV.2311.14337},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.14337},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Herberg, Evelyn; Herzog, Roland; Köhne, Frederik; Kreis, Leonie; Schiela, Anton
Sensitivity-Based Layer Insertion for Residual and Feedforward Neural Networks Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-15995,
title = {Sensitivity-Based Layer Insertion for Residual and Feedforward Neural Networks},
author = {Evelyn Herberg and Roland Herzog and Frederik Köhne and Leonie Kreis and Anton Schiela},
url = {https://doi.org/10.48550/arXiv.2311.15995},
doi = {10.48550/ARXIV.2311.15995},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.15995},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Haq, Ijaz Ul; Lee, Byung Suk
TransNAS-TSAD: Harnessing Transformers for Multi-Objective Neural Architecture Search in Time Series Anomaly Detection Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-18061,
title = {TransNAS-TSAD: Harnessing Transformers for Multi-Objective Neural Architecture Search in Time Series Anomaly Detection},
author = {Ijaz Ul Haq and Byung Suk Lee},
url = {https://doi.org/10.48550/arXiv.2311.18061},
doi = {10.48550/ARXIV.2311.18061},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.18061},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Loya, Hrushikesh; Dudziak, Lukasz; Mehrotra, Abhinav; Lee, Royson; Fernández-Marqués, Javier; Lane, Nicholas D.; Wen, Hongkai
How Much Is Hidden in the NAS Benchmarks? Few-Shot Adaptation of a NAS Predictor Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2311-18451,
title = {How Much Is Hidden in the NAS Benchmarks? Few-Shot Adaptation of a NAS Predictor},
author = {Hrushikesh Loya and Lukasz Dudziak and Abhinav Mehrotra and Royson Lee and Javier Fernández-Marqués and Nicholas D. Lane and Hongkai Wen},
url = {https://doi.org/10.48550/arXiv.2311.18451},
doi = {10.48550/ARXIV.2311.18451},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2311.18451},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Benmeziane, Hadjer; Ouarnoughi, Hamza; Niar, Smail; Maghraoui, Kaoutar El
Pareto Rank-Preserving Supernetwork for HW-NAS Technical Report
2023.
@techreport{<LineBreak>benmeziane2023pareto,
title = {Pareto Rank-Preserving Supernetwork for HW-NAS},
author = {Hadjer Benmeziane and Hamza Ouarnoughi and Smail Niar and Kaoutar El Maghraoui},
url = {https://www.researchgate.net/publication/374319863_Pareto_Rank-Preserving_Supernetwork_for_Hardware-Aware_Neural_Architecture_Search},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Mohammed, Mubarek
Model Compression Beyond Size Reduction Proceedings Article
In: Conference on Parsimony and Learning (Recent Spotlight Track), 2023.
@inproceedings{<LineBreak>mohammed2023model,
title = {Model Compression Beyond Size Reduction},
author = {Mubarek Mohammed},
url = {https://openreview.net/forum?id=HO0RdLgQtW},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Conference on Parsimony and Learning (Recent Spotlight Track)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wan, Qiyu; Wang, Lening; Wang, Jing; Song, Shuaiwen Leon; Fu, Xin
NAS-SE: Designing A Highly-Efficient In-Situ Neural Architecture Search Engine for Large-Scale Deployment Proceedings Article
In: Proceedings of the 56th Annual IEEE/ACM International Symposium on Microarchitecture, pp. 756–768, Association for Computing Machinery, <conf-loc>, <city>Toronto</city>, <state>ON</state>, <country>Canada</country>, </conf-loc>, 2023, ISBN: 9798400703294.
@inproceedings{10.1145/3613424.3614265,
title = {NAS-SE: Designing A Highly-Efficient In-Situ Neural Architecture Search Engine for Large-Scale Deployment},
author = {Qiyu Wan and Lening Wang and Jing Wang and Shuaiwen Leon Song and Xin Fu},
url = {https://doi.org/10.1145/3613424.3614265},
doi = {10.1145/3613424.3614265},
isbn = {9798400703294},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Proceedings of the 56th Annual IEEE/ACM International Symposium on Microarchitecture},
pages = {756–768},
publisher = {Association for Computing Machinery},
address = {<conf-loc>, <city>Toronto</city>, <state>ON</state>, <country>Canada</country>, </conf-loc>},
series = {MICRO '23},
abstract = {The emergence of Neural Architecture Search (NAS) enables an automated neural network development process that potentially replaces manually-enabled machine learning expertise. A state-of-the-art NAS method, namely One-Shot NAS, has been proposed to drastically reduce the lengthy search time for a wide spectrum of conventional NAS methods. Nevertheless, the search cost is still prohibitively expensive for practical large-scale deployment with real-world applications. In this paper, we reveal that the fundamental cause for inefficient deployment of One-Shot NAS in both single-device and large-scale scenarios originates from the massive redundant off-chip weight access during the numerous DNN inference in sequential searching. Inspired by its algorithmic characteristics, we depart from the traditional CMOS-based architecture designs and propose a promising processing-in-memory design alternative to perform in-situ architecture search, which helps fundamentally address the redundancy issue. Moreover, we further discovered two major performance challenges of directly porting the searching process onto the existing PIM-based accelerators: severe pipeline contention and resource under-utilization. By leveraging these insights, we propose the first highly-efficient in-situ One-Shot NAS search engine design, named NAS-SE, for both single-device and large-scale deployment scenarios. NAS-SE is equipped with a two-phased network diversification strategy for eliminating resource contention, and a novel hardware mapping scheme for boosting the resource utilization by an order of magnitude. Our extensive evaluation demonstrates that NAS-SE significantly outperforms the state-of-the-art digital-based customized NAS accelerator (NASA) with an average speedup of 8.8 × and energy-efficiency improvement of 2.05 ×.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghebriout, Mohamed Imed Eddine; Bouzidi, Halima; Niar, Sma"ıl; Ouarnoughi, Hamza
Harmonic-NAS: Hardware-Aware Multimodal Neural Architecture Search on Resource-constrained Devices Technical Report
2023.
@techreport{DBLP:journals/corr/abs-2309-06612b,
title = {Harmonic-NAS: Hardware-Aware Multimodal Neural Architecture Search on Resource-constrained Devices},
author = {Mohamed Imed Eddine Ghebriout and Halima Bouzidi and Sma"ıl Niar and Hamza Ouarnoughi},
url = {https://doi.org/10.48550/arXiv.2309.06612},
doi = {10.48550/ARXIV.2309.06612},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {CoRR},
volume = {abs/2309.06612},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
McDermott, Luke; Weitz, Jason; Demler, Dmitri; Cummings, Daniel; Tran, Nhan; Duarte, Javier
Neural Architecture Codesign for Fast Bragg Peak Analysis Technical Report
2023.
@techreport{mcdermott2023neural,
title = {Neural Architecture Codesign for Fast Bragg Peak Analysis},
author = {Luke McDermott and Jason Weitz and Dmitri Demler and Daniel Cummings and Nhan Tran and Javier Duarte},
url = {https://arxiv.org/abs/2312.05978},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lee, Jaeseong; hwang,
Multilingual Lottery Tickets to Pretrain Language Models Proceedings Article
In: The 2023 Conference on Empirical Methods in Natural Language Processing, 2023.
@inproceedings{<LineBreak>lee2023multilingual,
title = {Multilingual Lottery Tickets to Pretrain Language Models},
author = {Jaeseong Lee and hwang},
url = {https://openreview.net/forum?id=QG4BWnsX6m},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {The 2023 Conference on Empirical Methods in Natural Language Processing},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Arabi, Pau Mulet; Flowers, Alec; Mauch, Lukas; Cardinaux, Fabien
DBsurf: A Discrepancy Based Method for Discrete Stochastic Gradient Estimation Technical Report
2023.
@techreport{arabi2023dbsurf,
title = {DBsurf: A Discrepancy Based Method for Discrete Stochastic Gradient Estimation},
author = {Pau Mulet Arabi and Alec Flowers and Lukas Mauch and Fabien Cardinaux},
url = {https://arxiv.org/abs/2309.03974},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Qin, Huafeng; Fan, Chao; Deng, Shaojiang; Li, Yantao; El-Yacoubi, Mounim A.; Zhou, Gang
AG-NAS: An Attention GRU-based Neural Architecture Search for Finger-Vein Recognition Journal Article
In: IEEE Transactions on Information Forensics and Security, pp. 1-1, 2023.
@article{10348535,
title = {AG-NAS: An Attention GRU-based Neural Architecture Search for Finger-Vein Recognition},
author = {Huafeng Qin and Chao Fan and Shaojiang Deng and Yantao Li and Mounim A. El-Yacoubi and Gang Zhou},
url = {https://ieeexplore.ieee.org/abstract/document/10348535},
doi = {10.1109/TIFS.2023.3340915},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Information Forensics and Security},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
ZiWen, Dou; Dong, Ye
Multi-objective Neural Architecture Search for Efficient and Fast Semantic Segmentation on Edge Journal Article
In: IEEE Transactions on Intelligent Vehicles, pp. 1-12, 2023.
@article{10316624,
title = {Multi-objective Neural Architecture Search for Efficient and Fast Semantic Segmentation on Edge},
author = {Dou ZiWen and Ye Dong},
url = {https://ieeexplore.ieee.org/abstract/document/10316624},
doi = {10.1109/TIV.2023.3332594},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Intelligent Vehicles},
pages = {1-12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yang, Changdi; Sheng, Yi; Dong, Peiyan; Kong, Zhenglun; Li, Yanyu; Yu, Pinrui; Yang, Lei; Lin, Xue; Wang, Yanzhi
Fast and Fair Medical AI on the Edge Through Neural Architecture Search for Hybrid Vision Models Proceedings Article
In: 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD), pp. 01-09, 2023.
@inproceedings{10323652,
title = {Fast and Fair Medical AI on the Edge Through Neural Architecture Search for Hybrid Vision Models},
author = {Changdi Yang and Yi Sheng and Peiyan Dong and Zhenglun Kong and Yanyu Li and Pinrui Yu and Lei Yang and Xue Lin and Yanzhi Wang},
doi = {10.1109/ICCAD57390.2023.10323652},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD)},
pages = {01-09},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Poliakov, Egor; Hung, Wei-Jie; Huang, Ching-Chun
Efficient Constraint-Aware Neural Architecture Search for Object Detection Proceedings Article
In: 2023 Asia Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC), pp. 733-740, 2023.
@inproceedings{10317340,
title = {Efficient Constraint-Aware Neural Architecture Search for Object Detection},
author = {Egor Poliakov and Wei-Jie Hung and Ching-Chun Huang},
doi = {10.1109/APSIPAASC58517.2023.10317340},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {2023 Asia Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)},
pages = {733-740},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Lian; Wang, Ying; Zhao, Xiandong; Chen, Weiwei; Li, Huawei; Li, Xiaowei; Han, Yinhe
An Automatic Neural Network Architecture-and-Quantization Joint Optimization Framework for Efficient Model Inference Journal Article
In: IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, pp. 1-1, 2023.
@article{10342823,
title = {An Automatic Neural Network Architecture-and-Quantization Joint Optimization Framework for Efficient Model Inference},
author = {Lian Liu and Ying Wang and Xiandong Zhao and Weiwei Chen and Huawei Li and Xiaowei Li and Yinhe Han},
url = {https://ieeexplore.ieee.org/abstract/document/10342823},
doi = {10.1109/TCAD.2023.3339438},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sharifi, Zaniar; Soltanian, Khabat; Amiri, Ali
Developing Convolutional Neural Networks using a Novel Lamarckian Co-Evolutionary Algorithm Proceedings Article
In: 2023 13th International Conference on Computer and Knowledge Engineering (ICCKE), pp. 399-408, 2023.
@inproceedings{10326238,
title = {Developing Convolutional Neural Networks using a Novel Lamarckian Co-Evolutionary Algorithm},
author = {Zaniar Sharifi and Khabat Soltanian and Ali Amiri},
url = {https://ieeexplore.ieee.org/abstract/document/10326238},
doi = {10.1109/ICCKE60553.2023.10326238},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {2023 13th International Conference on Computer and Knowledge Engineering (ICCKE)},
pages = {399-408},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Xiangyi; Guo, Jiajia; Wen, Chao Kai; Tian, Wenqiang; Jin, Shi
Automatic Neural Network Design of Scene-customization for Massive MIMO CSI Feedback Proceedings Article
In: 2023 IEEE 98th Vehicular Technology Conference (VTC2023-Fall), pp. 1-5, 2023.
@inproceedings{10333588,
title = {Automatic Neural Network Design of Scene-customization for Massive MIMO CSI Feedback},
author = {Xiangyi Li and Jiajia Guo and Chao Kai Wen and Wenqiang Tian and Shi Jin},
doi = {10.1109/VTC2023-Fall60731.2023.10333588},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {2023 IEEE 98th Vehicular Technology Conference (VTC2023-Fall)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Xuan; Liu, Xiong; Huang, Zhifeng; Zhao, Tianyang
A Compact Neural Network-Based Conversion Loss Model with Hard Constraints for Energy Management Journal Article
In: IEEE Transactions on Industry Applications, pp. 1-13, 2023.
@article{10323525,
title = {A Compact Neural Network-Based Conversion Loss Model with Hard Constraints for Energy Management},
author = {Xuan Liu and Xiong Liu and Zhifeng Huang and Tianyang Zhao},
doi = {10.1109/TIA.2023.3334698},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Industry Applications},
pages = {1-13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Aliouat, Wahiba; Badis, Lamis; Bouchiba, Kenza
Metaheuristic-based automated design of Convolutional Neural Network architecture for plant seedlings classification Proceedings Article
In: 2023 5th International Conference on Pattern Analysis and Intelligent Systems (PAIS), pp. 1-8, 2023.
@inproceedings{10321990,
title = {Metaheuristic-based automated design of Convolutional Neural Network architecture for plant seedlings classification},
author = {Wahiba Aliouat and Lamis Badis and Kenza Bouchiba},
url = {https://ieeexplore.ieee.org/abstract/document/10321990},
doi = {10.1109/PAIS60821.2023.10321990},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {2023 5th International Conference on Pattern Analysis and Intelligent Systems (PAIS)},
pages = {1-8},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dong, Haoyuan; Gao, Yang; Wang, Haishuai; Yang, Hong; Zhang, Peng
Heterogeneous Graph Neural Architecture Search with GPT-4 Technical Report
2023.
@techreport{dong2023heterogeneous,
title = {Heterogeneous Graph Neural Architecture Search with GPT-4},
author = {Haoyuan Dong and Yang Gao and Haishuai Wang and Hong Yang and Peng Zhang},
url = {https://arxiv.org/abs/2312.08680},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, J.; Lv, Y.; Zhang, P.; Zhao, J.
Neural Architecture Search for Unsupervised PET Image Denoising Proceedings Article
In: 2023 IEEE Nuclear Science Symposium, Medical Imaging Conference and International Symposium on Room-Temperature Semiconductor Detectors (NSS MIC RTSD), pp. 1-1, 2023.
@inproceedings{10338097,
title = {Neural Architecture Search for Unsupervised PET Image Denoising},
author = {J. Li and Y. Lv and P. Zhang and J. Zhao},
doi = {10.1109/NSSMICRTSD49126.2023.10338097},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {2023 IEEE Nuclear Science Symposium, Medical Imaging Conference and International Symposium on Room-Temperature Semiconductor Detectors (NSS MIC RTSD)},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mu, Pan; Wu, Guanyao; Liu, Jinyuan; Zhang, Yuduo; Fan, Xin; Liu, Risheng
Learning to Search a Lightweight Generalized Network for Medical Image Fusion Journal Article
In: IEEE Transactions on Circuits and Systems for Video Technology, pp. 1-1, 2023.
@article{10360160,
title = {Learning to Search a Lightweight Generalized Network for Medical Image Fusion},
author = {Pan Mu and Guanyao Wu and Jinyuan Liu and Yuduo Zhang and Xin Fan and Risheng Liu},
url = {https://ieeexplore.ieee.org/abstract/document/10360160},
doi = {10.1109/TCSVT.2023.3342808},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Circuits and Systems for Video Technology},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
Auto-Prox: Training-Free Vision Transformer Architecture Search via Automatic Proxy Discovery Collection
2023.
@collection{wei2023autoprox,
title = {Auto-Prox: Training-Free Vision Transformer Architecture Search via Automatic Proxy Discovery},
author = {Zimian Wei and Lujun Li and Peijie Dong and Zheng Hui and Anggeng Li and Menglong Lu and Hengyue Pan and Zhiliang Tian and Dongsheng Li},
url = {https://arxiv.org/abs/2312.09059},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {AAAI 2024},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Sukthanker, Rhea Sanjay; Krishnakumar, Arjun; Safari, Mahmoud; Hutter, Frank
Weight-Entanglement Meets Gradient-Based Neural Architecture Search Technical Report
2023.
@techreport{sukthanker2023weightentanglement,
title = {Weight-Entanglement Meets Gradient-Based Neural Architecture Search},
author = {Rhea Sanjay Sukthanker and Arjun Krishnakumar and Mahmoud Safari and Frank Hutter},
url = {https://arxiv.org/abs/2312.10440},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chen, Tianyi; Ding, Tianyu; Zhu, Zhihui; Chen, Zeyu; Wu, HsiangTao; Zharkov, Ilya; Liang, Luming
OTOv3: Automatic Architecture-Agnostic Neural Network Training and Compression from Structured Pruning to Erasing Operators Technical Report
2023.
@techreport{chen2023otov3,
title = {OTOv3: Automatic Architecture-Agnostic Neural Network Training and Compression from Structured Pruning to Erasing Operators},
author = {Tianyi Chen and Tianyu Ding and Zhihui Zhu and Zeyu Chen and HsiangTao Wu and Ilya Zharkov and Luming Liang},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Castillo, Angela; Kohler, Jonas; Pérez, Juan C.; Pérez, Juan Pablo; Pumarola, Albert; Ghanem, Bernard; Arbeláez, Pablo; Thabet, Ali
Adaptive Guidance: Training-free Acceleration of Conditional Diffusion Models Technical Report
2023.
@techreport{castillo2023adaptive,
title = {Adaptive Guidance: Training-free Acceleration of Conditional Diffusion Models},
author = {Angela Castillo and Jonas Kohler and Juan C. Pérez and Juan Pablo Pérez and Albert Pumarola and Bernard Ghanem and Pablo Arbeláez and Ali Thabet},
url = {https://arxiv.org/abs/2312.12487},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chopra, Jatin; Kumar, Gaddam Prathik; Ghosh, Archismita; Prasad, Paarth; Vats, Shreya
A Novel Approach to Human Activity Recognition: Integrating Automated Machine Learning and Multimodal Fusion Mechanisms Journal Article
In: International Journal for Research Trends and Innovation, vol. 8, pp. 6, 2023.
@article{articlee,
title = {A Novel Approach to Human Activity Recognition: Integrating Automated Machine Learning and Multimodal Fusion Mechanisms},
author = {Jatin Chopra and Gaddam Prathik Kumar and Archismita Ghosh and Paarth Prasad and Shreya Vats},
url = {https://www.researchgate.net/profile/Jatin-Chopra/publication/376601691_A_Novel_Approach_to_Human_Activity_Recognition_Integrating_Automated_Machine_Learning_and_Multimodal_Fusion_Mechanisms/links/658016359d7bc03b3084fe90/A-Novel-Approach-to-Human-Activity-Recognition-Integrating-Automated-Machine-Learning-and-Multimodal-Fusion-Mechanisms.pdf},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {International Journal for Research Trends and Innovation},
volume = {8},
pages = {6},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Mengxuan; Liu, Long; Jin, Yaochu; Lei, Zhikun; Wang, Zhigang; Jiao, Licheng
Tree-shaped multiobjective evolutionary CNN for hyperspectral image classification Journal Article
In: Applied Soft Computing, pp. 111176, 2023, ISSN: 1568-4946.
@article{ZHANG2023111176,
title = {Tree-shaped multiobjective evolutionary CNN for hyperspectral image classification},
author = {Mengxuan Zhang and Long Liu and Yaochu Jin and Zhikun Lei and Zhigang Wang and Licheng Jiao},
url = {https://www.sciencedirect.com/science/article/pii/S1568494623011948},
doi = {https://doi.org/10.1016/j.asoc.2023.111176},
issn = {1568-4946},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Applied Soft Computing},
pages = {111176},
abstract = {Convolutional neural networks (CNNs) have achieved significant performances in hyperspectral image (HSI) classification in recent years. However, designing a high-performance CNN depends on human expertise heavily, which usually takes considerable time and labor. With regard to reducing the burden of designing the networks, neural architecture search (NAS) has attracted increasing attention. A typical NAS approach aims to optimize the network architectures in a predefined search space with a suitable search algorithm automatically. However, the existing NAS work does not fully consider the spatial resolution and the spectral noise interference of HSIs. Furthermore, most NAS approaches use sequential blocks or cells to construct the networks, which are unsuitable for extracting multiscale features of HSIs and result in degraded performance. Considering the above challenges, we propose a tree-shaped multiobjective evolutionary CNN (TMOE-CNN) for HSI classification. An expanded search space is designed, which includes the image patch size and the channel number of the input image patches. A multibranch supernetwork structure is proposed, which resembles a tree as the fundamental architecture for the network block. The image patch size and the denoising strength of the input image patches can be established adaptively throughout the evolutionary search process. The tree-shaped networks can fuse multiscale features to enhance the capacity of the network for feature extraction. Additionally, we consider both the classification accuracy and the floating-point computational complexity in the environmental selection. It is helpful to find the networks with simple structure and low complexity while ensuring classification accuracy. Experiments on different HSI datasets show that TMOE-CNN can search CNNs with high accuracies and simple structures automatically.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
He, Hongyi; Liu, Longjun; Zhang, Haonan; Zheng, Nanning
IS-DARTS: Stabilizing DARTS through Precise Measurement on Candidate Importance Technical Report
2023.
@techreport{he2023isdarts,
title = {IS-DARTS: Stabilizing DARTS through Precise Measurement on Candidate Importance},
author = {Hongyi He and Longjun Liu and Haonan Zhang and Nanning Zheng},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Sridhar, Sharath Nittur; Szankin, Maciej; Chen, Fang; Sundaresan, Sairam; Sarah, Anthony
SimQ-NAS: Simultaneous Quantization Policy and Neural Architecture Search Technical Report
2023.
@techreport{sridhar2023simqnas,
title = {SimQ-NAS: Simultaneous Quantization Policy and Neural Architecture Search},
author = {Sharath Nittur Sridhar and Maciej Szankin and Fang Chen and Sairam Sundaresan and Anthony Sarah},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Zitong; Liu, Zhicheng; Zhao, Yunfeng; Qiu, Chao; Zhang, Cheng; Wang, Xiaofei
ENASFL: A Federated Neural Architecture Search Scheme for Heterogeneous Deep Models in Distributed Edge Computing Systems Journal Article
In: IEEE Transactions on Network Science and Engineering, pp. 1-11, 2023.
@article{10366825,
title = {ENASFL: A Federated Neural Architecture Search Scheme for Heterogeneous Deep Models in Distributed Edge Computing Systems},
author = {Zitong Zhang and Zhicheng Liu and Yunfeng Zhao and Chao Qiu and Cheng Zhang and Xiaofei Wang},
doi = {10.1109/TNSE.2023.3344850},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Network Science and Engineering},
pages = {1-11},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Tianyi; Liang, Luming; Ding, Tianyu; Zharkov, Ilya
OTOv3: Towards Automatic Sub-Network Search Within General Super Deep Neural Networks Miscellaneous
2023.
@misc{<LineBreak>chen2023otov,
title = {OTOv3: Towards Automatic Sub-Network Search Within General Super Deep Neural Networks},
author = {Tianyi Chen and Luming Liang and Tianyu Ding and Ilya Zharkov},
url = {https://openreview.net/forum?id=9RUblEXVVD},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Lanjewar, Madhusudan G; Panchbhai, Kamini G
Enhancing Fish Freshness Prediction using NasNet-LSTM Journal Article
In: Journal of Food Composition and Analysis, pp. 105945, 2023, ISSN: 0889-1575.
@article{LANJEWAR2023105945,
title = {Enhancing Fish Freshness Prediction using NasNet-LSTM},
author = {Madhusudan G Lanjewar and Kamini G Panchbhai},
url = {https://www.sciencedirect.com/science/article/pii/S0889157523008190},
doi = {https://doi.org/10.1016/j.jfca.2023.105945},
issn = {0889-1575},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Journal of Food Composition and Analysis},
pages = {105945},
abstract = {Fish freshness is an essential feature of the fishing industry because it affects the safety and quality of the product. Precisely estimating seafood freshness is critical for consumer pleasure and waste reduction. This study presented a fish freshness prediction framework using two datasets from Kaggle, which were combined but highly imbalanced. Both upscaling (SMOTEENN) and downscaling (Random Under Sampler) methods were used to address the dataset imbalance. Neural Architecture Search Network (NasNet) and Long Short-Term Memory Networks (LSTM) models were employed to extract features from images. A feature selection technique was also applied to identify the most relevant features from the extracted features. The proposed NasNet-LSTM approach achieved impressive Matthew's correlation coefficient (MCC) and Cohen's kappa coefficient (KC) scores of 99.1%. The models were also cross-validated using a 5-fold method, resulting in MCC and KC values of 97%. Moreover, the p-value and confidence intervals of the proposed method were analyzed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xie, Tao; Dai, Kun; Jiang, Zhiqiang; Li, Ruifeng; Mao, Shouren; Wang, Ke; Zhao, Lijun
ViT-MVT: A Unified Vision Transformer Network for Multiple Vision Tasks Journal Article
In: IEEE Transactions on Neural Networks and Learning Systems, pp. 1-15, 2023.
@article{10368184,
title = {ViT-MVT: A Unified Vision Transformer Network for Multiple Vision Tasks},
author = {Tao Xie and Kun Dai and Zhiqiang Jiang and Ruifeng Li and Shouren Mao and Ke Wang and Lijun Zhao},
url = {https://ieeexplore.ieee.org/abstract/document/10368184},
doi = {10.1109/TNNLS.2023.3342141},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
pages = {1-15},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tu, Chongjun; Ye, Peng; Lin, Weihao; Ye, Hancheng; Yu, Chong; Chen, Tao; Li, Baopu; Ouyang, Wanli
Efficient Architecture Search via Bi-level Data Pruning Technical Report
2023.
@techreport{tu2023efficient,
title = {Efficient Architecture Search via Bi-level Data Pruning},
author = {Chongjun Tu and Peng Ye and Weihao Lin and Hancheng Ye and Chong Yu and Tao Chen and Baopu Li and Wanli Ouyang},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}