Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2024
Pinos, Michal; Sekanina, Lukas; Mrazek, Vojtech
ApproxDARTS: Differentiable Neural Architecture Search with Approximate Multipliers Technical Report
2024.
@techreport{pinos2024approxdarts,
title = {ApproxDARTS: Differentiable Neural Architecture Search with Approximate Multipliers},
author = {Michal Pinos and Lukas Sekanina and Vojtech Mrazek},
url = {https://arxiv.org/abs/2404.08002},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Lianqiang; Yan, Chenqian; Chen, Yefei
Differentiable Search for Finding Optimal Quantization Strategy Technical Report
2024.
@techreport{li2024differentiable,
title = {Differentiable Search for Finding Optimal Quantization Strategy},
author = {Lianqiang Li and Chenqian Yan and Yefei Chen},
url = {https://arxiv.org/abs/2404.08010},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lyu, Bo; Yang, Yin; Cao, Yuting; Shi, Tuo; Chen, Yiran; Huang, Tingwen; Wen, Shiping
A memristive all-inclusive hypernetwork for parallel analog deployment of full search space architectures Journal Article
In: Neural Networks, vol. 175, pp. 106312, 2024, ISSN: 0893-6080.
@article{LYU2024106312,
title = {A memristive all-inclusive hypernetwork for parallel analog deployment of full search space architectures},
author = {Bo Lyu and Yin Yang and Yuting Cao and Tuo Shi and Yiran Chen and Tingwen Huang and Shiping Wen},
url = {https://www.sciencedirect.com/science/article/pii/S0893608024002363},
doi = {https://doi.org/10.1016/j.neunet.2024.106312},
issn = {0893-6080},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Neural Networks},
volume = {175},
pages = {106312},
abstract = {In recent years, there has been a significant advancement in memristor-based neural networks, positioning them as a pivotal processing-in-memory deployment architecture for a wide array of deep learning applications. Within this realm of progress, the emerging parallel analog memristive platforms are prominent for their ability to generate multiple feature maps in a single processing cycle. However, a notable limitation is that they are specifically tailored for neural networks with fixed structures. As an orthogonal direction, recent research reveals that neural architecture should be specialized for tasks and deployment platforms. Building upon this, the neural architecture search (NAS) methods effectively explore promising architectures in a large design space. However, these NAS-based architectures are generally heterogeneous and diversified, making it challenging for deployment on current single-prototype, customized, parallel analog memristive hardware circuits. Therefore, investigating memristive analog deployment that overrides the full search space is a promising and challenging problem. Inspired by this, and beginning with the DARTS search space, we study the memristive hardware design of primitive operations and propose the memristive all-inclusive hypernetwork that covers 2×1025 network architectures. Our computational simulation results on 3 representative architectures (DARTS-V1, DARTS-V2, PDARTS) show that our memristive all-inclusive hypernetwork achieves promising results on the CIFAR10 dataset (89.2% of PDARTS with 8-bit quantization precision), and is compatible with all architectures in the DARTS full-space. The hardware performance simulation indicates that the memristive all-inclusive hypernetwork costs slightly more resource consumption (nearly the same in power, 22%∼25% increase in Latency, 1.5× in Area) relative to the individual deployment, which is reasonable and may reach a tolerable trade-off deployment scheme for industrial scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ahmad, Afzal; Du, Linfeng; Xie, Zhiyao; Zhang, Wei
Accel-NASBench: Sustainable Benchmarking for Accelerator-Aware NAS Technical Report
2024.
@techreport{ahmad2024accelnasbench,
title = {Accel-NASBench: Sustainable Benchmarking for Accelerator-Aware NAS},
author = {Afzal Ahmad and Linfeng Du and Zhiyao Xie and Wei Zhang},
url = {https://arxiv.org/abs/2404.08005},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Shaowei; Zhang, Lingling; Qin, Tao; Liu, Jun; Li, Yifei; Wang, Qianying; Zheng, Qinghua
Multi-view cognition with path search for one-shot part labeling Journal Article
In: Computer Vision and Image Understanding, vol. 244, pp. 104015, 2024, ISSN: 1077-3142.
@article{WANG2024104015,
title = {Multi-view cognition with path search for one-shot part labeling},
author = {Shaowei Wang and Lingling Zhang and Tao Qin and Jun Liu and Yifei Li and Qianying Wang and Qinghua Zheng},
url = {https://www.sciencedirect.com/science/article/pii/S1077314224000961},
doi = {https://doi.org/10.1016/j.cviu.2024.104015},
issn = {1077-3142},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Computer Vision and Image Understanding},
volume = {244},
pages = {104015},
abstract = {The diagram is an abstract form of visual expression in the field of education, which is often used to express complex phenomena and convey logic relationships. In recent years, tasks such as diagram classification and textbook question answering have attracted attention and become a new benchmark for evaluating the complex reasoning ability of models. However, due to the lack of large corpora and the abstract and sparse visual expressions, it is difficult for research methods on natural images to achieve good results on diagrams. In order to solve the above challenges, the researchers consider using the one-shot setting for limited samples challenge and using part labeling to enhance the learning of relational structures. By definition, the one-shot part labeling task is to label multiple parts of an object in the query diagram given only a single support diagram of that category. Under this setting, we propose the Automated Search Multi-view Matching Network (Auto-MMN) which simulating human cognitive methods and process of set-to-set matching problem. We define three views operations based on the attention mechanism and multiplex graph, including the learning of global visual features (global–local view), the interaction between neighboring parts (local–local view), and the comparison of counterparts (cross-local view). We propose a novel learning path search technology to adaptively plan paths for the above three views, which can also increase the generalization performance of the model. We evaluate the Auto-MMN on three different datasets, that is, image-to-image, diagram-to-diagram, and image-to-diagram part labeling scenarios. Extensive experiments show that our model significantly outperforms other baselines on different scenarios and both the multi-view operations and the learning path search produce excellent results. We open source the core code in https://github.com/WayneWong97/Auto-MMN.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ye, Yin; Chen, Yaxiong; Xiong, Shengwu
Field detection of pests based on adaptive feature fusion and evolutionary neural architecture search Journal Article
In: Computers and Electronics in Agriculture, vol. 221, pp. 108936, 2024, ISSN: 0168-1699.
@article{YE2024108936,
title = {Field detection of pests based on adaptive feature fusion and evolutionary neural architecture search},
author = {Yin Ye and Yaxiong Chen and Shengwu Xiong},
url = {https://www.sciencedirect.com/science/article/pii/S0168169924003272},
doi = {https://doi.org/10.1016/j.compag.2024.108936},
issn = {0168-1699},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Computers and Electronics in Agriculture},
volume = {221},
pages = {108936},
abstract = {Accurate detection of pests is vital in smart agriculture as it is among the main factors that profoundly influence the yield and quality of crops. In the actual field, pests frequently manifest as small objects, thereby presenting a considerable obstacle to effectively detect pests in the field. For the problem of ineffective utilization of plant context information and inadequate design of neural architecture in field pest detection, we propose the pest detection model (PestNAS) based on adaptive feature fusion and evolutionary neural architecture search. It consists of the adaptive feature fusion module: plant context information is extracted, and the adaptive fusion of pest-related features and plant auxiliary features is designed to effectively utilize plant information; the evolutionary search space module: the novel search space that includes resolution and receptive field enhancement operations is designed with evolution to improve pest representation; the GA-Adam search algorithm: the Adam with genetic algorithm is designed to optimize the objective function of neural architecture search and obtain the relatively better neural architecture for pest detection. The ablation experiments verify the effectiveness of each module in the PestNAS. The comparison experiments reveal that the PestNAS can achieve higher detection accuracy than the other ten neural architecture search models on eleven field pests.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Song, Changwei; Ma, Yongjie; Xu, Yang; Chen, Hong
Multi-population evolutionary neural architecture search with stacked generalization Journal Article
In: Neurocomputing, vol. 587, pp. 127664, 2024, ISSN: 0925-2312.
@article{SONG2024127664,
title = {Multi-population evolutionary neural architecture search with stacked generalization},
author = {Changwei Song and Yongjie Ma and Yang Xu and Hong Chen},
url = {https://www.sciencedirect.com/science/article/pii/S0925231224004351},
doi = {https://doi.org/10.1016/j.neucom.2024.127664},
issn = {0925-2312},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Neurocomputing},
volume = {587},
pages = {127664},
abstract = {In recent years, neural architecture search (NAS) algorithms based on Evolutionary Computation (EC) have demonstrated immense potential in the automated design of deep neural network architectures, garnering widespread attention in the field of deep learning. Most EC-based NAS algorithms select the best individual based on overall fitness score. However, some eliminated suboptimal individuals may only perform weakly in overall classification performance, but perform well on certain classes. To search valuable suboptimal individuals and prevent them from being eliminated, we propose a multi-population evolutionary NAS algorithm with stacked generalization (MPE-NAS). Each population evolves based on the classification accuracy of different classes. After completing the evolution process, the stacked generalization approach is utilized to fuse the searched architectures. Moreover, an integrated performance predictor based on k-nearest neighbor (KNN) regression, random forest (RF) and support vector machine (SVM) is proposed to alleviate computational cost during architecture performance evaluation. On the CIFAR benchmark dataset, the proposed algorithm is examined and compared with the most advanced algorithms, and its effectiveness is confirmed based on experiments. In addition, the proposed multi-population evolutionary (MPE) search strategy is applied to others EC-based NAS algorithms, and achieves the performance improvement without increasing computational resources.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Balaji, Adarsha; Hadidi, Ramyad; Kollmer, Gregory; Fouda, Mohammed E.; Balaprakash, Prasanna
Network architecture search of X-ray based scientific applications Technical Report
2024.
@techreport{balaji2024network,
title = {Network architecture search of X-ray based scientific applications},
author = {Adarsha Balaji and Ramyad Hadidi and Gregory Kollmer and Mohammed E. Fouda and Prasanna Balaprakash},
url = {https://arxiv.org/abs/2404.10689},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Berezsky, Oleh; Liashchynskyi, Petro; Pitsun, Oleh; Izonin, Ivan
Synthesis of Convolutional Neural Network architectures for biomedical image classification Journal Article
In: Biomedical Signal Processing and Control, vol. 95, pp. 106325, 2024, ISSN: 1746-8094.
@article{BEREZSKY2024106325,
title = {Synthesis of Convolutional Neural Network architectures for biomedical image classification},
author = {Oleh Berezsky and Petro Liashchynskyi and Oleh Pitsun and Ivan Izonin},
url = {https://www.sciencedirect.com/science/article/pii/S1746809424003835},
doi = {https://doi.org/10.1016/j.bspc.2024.106325},
issn = {1746-8094},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Biomedical Signal Processing and Control},
volume = {95},
pages = {106325},
abstract = {Convolutional Neural Networks (CNNs) are frequently used for image classification. This is crucial for the biomedical image classification used for automatic diagnosis in oncology. Designing optimal convolutional neural network architectures is a routine procedure that requires expert knowledge of computer vision and biomedical image features. To address this issue, we developed an automatic method for finding optimal CNN architectures. Our two-step method includes a genetic algorithm-based micro- and macro-search. Micro-search aims to find the optimal cell architecture based on the number of nodes and a set of predefined operations between nodes. Macro-search identifies the optimal number of cells and the operations between them to obtain the final optimal architecture. We obtained several optimal CNN architectures using the developed method of automatic architecture search. We conducted several computer experiments using cytological image classification as an example. The studies’ findings demonstrated that cytological image classification accuracy is higher compared to the classification accuracy of known CNN architectures (VGG-16, AlexNet, LeNet-5, ResNet-18, ResNet-50, MobileNetV3). The method is efficient because the search time for optimal architectures is short. Additionally, the method of optimal architecture search can be used for the synthesis of architectures used for the classification of other classes of biomedical images.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jawahar, Ganesh
Methods for design of efficient on-device natural language processing architectures PhD Thesis
University of British Columbia, 2024.
@phdthesis{Jawahar_2024,
title = {Methods for design of efficient on-device natural language processing architectures},
author = {Ganesh Jawahar},
url = {https://open.library.ubc.ca/collections/ubctheses/24/items/1.0441384},
doi = {http://dx.doi.org/10.14288/1.0441384},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
school = {University of British Columbia},
series = {Electronic Theses and Dissertations (ETDs) 2008+},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Qin, Danfeng; Leichner, Chas; Delakis, Manolis; Fornoni, Marco; Luo, Shixin; Yang, Fan; Wang, Weijun; Banbury, Colby; Ye, Chengxi; Akin, Berkin; Aggarwal, Vaibhav; Zhu, Tenghui; Moro, Daniele; Howard, Andrew
MobileNetV4 – Universal Models for the Mobile Ecosystem Technical Report
2024.
@techreport{qin2024mobilenetv4,
title = {MobileNetV4 – Universal Models for the Mobile Ecosystem},
author = {Danfeng Qin and Chas Leichner and Manolis Delakis and Marco Fornoni and Shixin Luo and Fan Yang and Weijun Wang and Colby Banbury and Chengxi Ye and Berkin Akin and Vaibhav Aggarwal and Tenghui Zhu and Daniele Moro and Andrew Howard},
url = {https://arxiv.org/abs/2404.10518},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xiang, Xunzhi; Jing, Kun; Xu, Jungang
A Neural Architecture Predictor based on GNN-Enhanced Transformer Proceedings Article
In: Dasgupta, Sanjoy; Mandt, Stephan; Li, Yingzhen (Ed.): International Conference on Artificial Intelligence and Statistics, 2-4 May 2024, Palau de Congressos, Valencia, Spain, pp. 1729–1737, PMLR, 2024.
@inproceedings{DBLP:conf/aistats/XiangJX24,
title = {A Neural Architecture Predictor based on GNN-Enhanced Transformer},
author = {Xunzhi Xiang and Kun Jing and Jungang Xu},
editor = {Sanjoy Dasgupta and Stephan Mandt and Yingzhen Li},
url = {https://proceedings.mlr.press/v238/xiang24a.html},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {International Conference on Artificial Intelligence and Statistics,
2-4 May 2024, Palau de Congressos, Valencia, Spain},
volume = {238},
pages = {1729–1737},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Yue; Yu, Ziyi; Liu, Zitu; Tian, Wenjie
Efficient Neural Architecture Design via Capturing Architecture-Performance Joint Distribution Proceedings Article
In: Dasgupta, Sanjoy; Mandt, Stephan; Li, Yingzhen (Ed.): International Conference on Artificial Intelligence and Statistics, 2-4 May 2024, Palau de Congressos, Valencia, Spain, pp. 1738–1746, PMLR, 2024.
@inproceedings{DBLP:conf/aistats/LiuYLT24,
title = {Efficient Neural Architecture Design via Capturing Architecture-Performance Joint Distribution},
author = {Yue Liu and Ziyi Yu and Zitu Liu and Wenjie Tian},
editor = {Sanjoy Dasgupta and Stephan Mandt and Yingzhen Li},
url = {https://proceedings.mlr.press/v238/liu24b.html},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {International Conference on Artificial Intelligence and Statistics,
2-4 May 2024, Palau de Congressos, Valencia, Spain},
volume = {238},
pages = {1738–1746},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Muñoz, J. Pablo; Yuan, Jinjie; Jain, Nilesh
Shears: Unstructured Sparsity with Neural Low-rank Adapter Search Technical Report
2024.
@techreport{muñoz2024shears,
title = {Shears: Unstructured Sparsity with Neural Low-rank Adapter Search},
author = {J. Pablo Muñoz and Jinjie Yuan and Nilesh Jain},
url = {https://arxiv.org/abs/2404.10934},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lou, Wenqi; Gong, Lei; Wang, Chao; Qian, Jiaming; Wang, Xuan; Li, Changlong; Zhou, Xuehai
Unleashing Network/Accelerator Co-Exploration Potential on FPGAs: A Deeper Joint Search Journal Article
In: IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, pp. 1-1, 2024.
@article{10505904,
title = {Unleashing Network/Accelerator Co-Exploration Potential on FPGAs: A Deeper Joint Search},
author = {Wenqi Lou and Lei Gong and Chao Wang and Jiaming Qian and Xuan Wang and Changlong Li and Xuehai Zhou},
doi = {10.1109/TCAD.2024.3391688},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neumeyer, Simon; Stier, Julian; Granitzer, Michael
Efficient NAS with FaDE on Hierarchical Spaces Proceedings Article
In: Miliou, Ioanna; Piatkowski, Nico; Papapetrou, Panagiotis (Ed.): Advances in Intelligent Data Analysis XXII, pp. 158–170, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-58553-1.
@inproceedings{10.1007/978-3-031-58553-1_13,
title = {Efficient NAS with FaDE on Hierarchical Spaces},
author = {Simon Neumeyer and Julian Stier and Michael Granitzer},
editor = {Ioanna Miliou and Nico Piatkowski and Panagiotis Papapetrou},
url = {https://link.springer.com/chapter/10.1007/978-3-031-58553-1_13},
isbn = {978-3-031-58553-1},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Advances in Intelligent Data Analysis XXII},
pages = {158–170},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Neural architecture search (NAS) is a challenging problem. Hierarchical search spaces allow for cheap evaluations of neural network sub modules to serve as surrogate for architecture evaluations. Yet, sometimes the hierarchy is too restrictive or the surrogate fails to generalize. We present FaDE which uses differentiable architecture search to obtain relative performance predictions on finite regions of a hierarchical NAS space. The relative nature of these ranks calls for a memory-less, batch-wise outer search algorithm for which we use an evolutionary algorithm with pseudo-gradient descent. FaDE is especially suited on deep hierarchical, respectively multi-cell search spaces, which it can explore by linear instead of exponential cost and therefore eliminates the need for a proxy search space.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Phan, Quan Minh; Luong, Ngoc Hoang
Parameter-less Pareto local search for multi-objective neural architecture search with the Interleaved Multi-start Scheme Journal Article
In: Swarm and Evolutionary Computation, vol. 87, pp. 101573, 2024, ISSN: 2210-6502.
@article{PHAN2024101573,
title = {Parameter-less Pareto local search for multi-objective neural architecture search with the Interleaved Multi-start Scheme},
author = {Quan Minh Phan and Ngoc Hoang Luong},
url = {https://www.sciencedirect.com/science/article/pii/S2210650224001111},
doi = {https://doi.org/10.1016/j.swevo.2024.101573},
issn = {2210-6502},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Swarm and Evolutionary Computation},
volume = {87},
pages = {101573},
abstract = {With the emerging deployment of deep neural networks, such as in mobile devices and autonomous cars, there is a growing demand for neural architecture search (NAS) to automatically design powerful network architectures. It is more reasonable to formulate NAS as a multi-objective optimization problem. In addition to prediction performance, multi-objective NAS (MONAS) problems take into account other criteria like the number of parameters and inference latency. Multi-objective evolutionary algorithms (MOEAs) are the preferred approach for tackling MONAS due to their effectiveness in dealing with multi-objective optimization problems. Recently, local search-based NAS algorithms have demonstrated their efficiency over MOEAs for MONAS problems. However, their performance has been only verified on bi-objective NAS problems. In this article, we propose a local search algorithm for multi-objective NAS (LOMONAS), an efficient local search framework for solving not only bi-objective NAS problems but also NAS problems having more than two objectives. We additionally present a parameter-less version of LOMONAS, namely IMS-LOMONAS, by combining LOMONAS with the Interleaved Multi-start Scheme (IMS) to help NAS practitioners avoid manual control parameter settings. Experimental results from a series of benchmark problems in the CEC’23 Competition demonstrate the competitiveness of LOMONAS and IMS-LOMONAS compared to MOEAs in tackling MONAS within both small-scale and large-scale search spaces. Source code is available at: https://github.com/ELO-Lab/IMS-LOMONAS.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sinha, Nilotpal; Rostami, Peyman; Shabayek, Abd El Rahman; Kacem, Anis; Aouada, Djamila
Multi-Objective Hardware Aware Neural Architecture Search using Hardware Cost Diversity Technical Report
2024.
@techreport{sinha2024multiobjective,
title = {Multi-Objective Hardware Aware Neural Architecture Search using Hardware Cost Diversity},
author = {Nilotpal Sinha and Peyman Rostami and Abd El Rahman Shabayek and Anis Kacem and Djamila Aouada},
url = {https://arxiv.org/abs/2404.12403},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Terin, Rodrigo Carmo; Arenas, Zochil González; Santana, Roberto
Identifying phase transitions in physical systems with neural networks: a neural architecture search perspective Technical Report
2024.
@techreport{terin2024identifying,
title = {Identifying phase transitions in physical systems with neural networks: a neural architecture search perspective},
author = {Rodrigo Carmo Terin and Zochil González Arenas and Roberto Santana},
url = {https://arxiv.org/abs/2404.15118},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Yanghu; Zhou, Zheng; Yang, Laihao; Gao, Robert X.; Yan, Ruqiang
Wavelet-driven differentiable architecture search for planetary gear fault diagnosis Journal Article
In: Journal of Manufacturing Systems, vol. 74, pp. 587-593, 2024, ISSN: 0278-6125.
@article{WANG2024587,
title = {Wavelet-driven differentiable architecture search for planetary gear fault diagnosis},
author = {Yanghu Wang and Zheng Zhou and Laihao Yang and Robert X. Gao and Ruqiang Yan},
url = {https://www.sciencedirect.com/science/article/pii/S0278612524000839},
doi = {https://doi.org/10.1016/j.jmsy.2024.04.017},
issn = {0278-6125},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {Journal of Manufacturing Systems},
volume = {74},
pages = {587-593},
abstract = {With the advancement of artificial intelligence and the accumulation of industrial big data, intelligent diagnosis methods based on deep learning have become the mainstream for diagnosing mechanical faults in manufacturing systems. Despite this, developing high-performance neural network models for specific tasks necessitates a substantial amount of expert knowledge. This consumes a considerable amount of time in the trial-and-error process, thereby constraining the progress in neural network development. Neural architecture search (NAS) offers a solution to address this problem. However, the feature extraction operators in the existing NAS search space are primarily imported from other fields, leading to domain bias and a lack of interpretability. To address these challenges, we present a NAS method driven by wavelets. To be specific, the wavelet operators that can extract fault related features from vibration signals is added to the search space, and the gradient optimization strategy is utilized to search the optimal architecture from the hypernet. The effectiveness of the proposed method is validated through a dataset specific to planetary gears. Upon comparison with other models, it is evident that the proposed method exhibits superior performance across all added noise levels.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Broni-Bediako, Clifford; Xia, Junshi; Yokoya, Naoto
Unsupervised Domain Adaptation Architecture Search with Self-Training for Land Cover Mapping Technical Report
2024.
@techreport{bronibediako2024unsupervised,
title = {Unsupervised Domain Adaptation Architecture Search with Self-Training for Land Cover Mapping},
author = {Clifford Broni-Bediako and Junshi Xia and Naoto Yokoya},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lu, Aojun; Feng, Tao; Yuan, Hangjie; Song, Xiaotian; Sun, Yanan
Revisiting Neural Networks for Continual Learning: An Architectural Perspective Technical Report
2024.
@techreport{lu2024revisiting,
title = {Revisiting Neural Networks for Continual Learning: An Architectural Perspective},
author = {Aojun Lu and Tao Feng and Hangjie Yuan and Xiaotian Song and Yanan Sun},
url = {https://arxiv.org/abs/2404.14829},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
2023
(Ed.)
Designing a New Search Space for Multivariate Time-Series Neural Architecture Search Collection
2023.
@collection{MacKinnon-ecmlw23a,
title = {Designing a New Search Space for Multivariate Time-Series Neural Architecture Search},
author = {Christopher MacKinnon and Robert Atkinson},
url = {https://books.google.de/books?hl=de&lr=&id=iFHqEAAAQBAJ&oi=fnd&pg=PA190&dq=%22neural+architecture+search%22&ots=CTD1RKtbFl&sig=vM1TNI0RduFF3A62Q04s-Ls7ZbM#v=onepage&q=%22neural%20architecture%20search%22&f=false},
year = {2023},
date = {2023-12-23},
urldate = {2023-12-23},
booktitle = {Advanced Analytics and Learning on Temporal Data: 8th ECML PKDD Workshop ...},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Zhao, Junbo; Ning, Xuefei; Liu, Enshu; Ru, Binxin; Zhou, Zixuan; Zhao, Tianchen; Chen, Chen; Zhang, Jiajin; Liao, Qingmin; Wang, Yu
Dynamic Ensemble of Low-fidelity Experts: Mitigating NAS “Cold-Start” Technical Report
2023.
@techreport{Zhao22,
title = {Dynamic Ensemble of Low-fidelity Experts: Mitigating NAS “Cold-Start”},
author = {Junbo Zhao and Xuefei Ning and Enshu Liu and Binxin Ru and Zixuan Zhou and Tianchen Zhao and Chen Chen and Jiajin Zhang and Qingmin Liao and Yu Wang},
url = {https://nicsefc.ee.tsinghua.edu.cn/nics_file/pdf/4208e529-772e-4977-be31-0b7cc4c7a9fc.pdf},
year = {2023},
date = {2023-12-20},
urldate = {2023-12-20},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Baochang; Xu, Sheng; Lin, Mingbao; Wang, Tiancheng; Doermann, David
Binary Neural Networks: Algorithms, Architectures, and Applications Book
2023.
@book{ZhangBNN23a,
title = {Binary Neural Networks: Algorithms, Architectures, and Applications },
author = {Baochang Zhang and Sheng Xu and Mingbao Lin and Tiancheng Wang and David Doermann},
url = {https://www.taylorfrancis.com/books/mono/10.1201/9781003376132/binary-neural-networks-baochang-zhang-sheng-xu-mingbao-lin-tiancheng-wang-david-doermann},
year = {2023},
date = {2023-12-13},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
(Ed.)
Multi-task Graph Neural Architecture Search with Task-aware Collaboration and Curriculum Collection
2023.
@collection{Qin-neurips23a,
title = {Multi-task Graph Neural Architecture Search with Task-aware Collaboration and Curriculum},
author = {Yijian Qin and Xin Wang and Ziwei Zhang and Hong Chen and Wenwu Zhu},
url = {https://openreview.net/pdf?id=TOxpAwp0VE},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {37th Conference on Neural Information Processing Systems (NeurIPS 2023).},
journal = {37th Conference on Neural Information Processing Systems (NeurIPS 2023)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
Unsupervised Graph Neural Architecture Search with Disentangled Self-supervision Collection
2023.
@collection{Zhang-neurips23a,
title = {Unsupervised Graph Neural Architecture Search with Disentangled Self-supervision},
author = {Zeyang Zhang and Xin Wang and Ziwei Zhang and Guangyao Shen and Shiqi Shen and Wenwu Zhu},
url = {https://openreview.net/pdf?id=UAFa5ZhR85},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {37th Conference on Neural Information Processing Systems (NeurIPS 2023).},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
MathNAS: If Blocks Have a Role in Mathematical Architecture Design Collection
2023.
@collection{Wang-neurips23a,
title = {MathNAS: If Blocks Have a Role in Mathematical Architecture Design},
author = {Qinsi Wang and Jinghan Ke and Zhi Liang and Sihai Zhang},
url = {https://openreview.net/pdf?id=e1l4ZYprQH},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {37th Conference on Neural Information Processing Systems (NeurIPS 2023)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
KD-Zero: Evolving Knowledge Distiller for Any Teacher-Student Pairs Collection
2023.
@collection{Li-neurips23a,
title = {KD-Zero: Evolving Knowledge Distiller for Any Teacher-Student Pairs},
author = {Lujun Li and Peijie Dong and Anggeng Li and Zimian Wei and Ya Yang},
url = {https://openreview.net/pdf?id=OlMKa5YZ8e},
year = {2023},
date = {2023-12-01},
booktitle = {37th Conference on Neural Information Processing Systems (NeurIPS 2023)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
Analyzing Generalization of Neural Networks through Loss Path Kernels Collection
2023.
@collection{Chen-neurips23a,
title = {Analyzing Generalization of Neural Networks through Loss Path Kernels},
author = {Yilan Chen and Wei Huang and Hao Wang and Charlotte Loh and Akash Srivastava and Lam M.Nguyen and Tsui-Wei Weng},
url = {https://openreview.net/pdf?id=8Ba7VJ7xiM},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {37th Conference on Neural Information Processing Systems (NeurIPS 2023)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
Operation-Level Early Stopping for Robustifying Differentiable NAS Collection
2023.
@collection{Jiang-neurips23a,
title = {Operation-Level Early Stopping for Robustifying Differentiable NAS},
author = {Shen Jiang and Zipeng Ji and Guanghui Zhu and Chunfeng Yuan},
url = {https://openreview.net/pdf?id=yAOwkf4FyL},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {37th Conference on Neural Information Processing Systems (NeurIPS 2023).},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
AutoGO: Automated Computation Graph Optimization for Neural Network Evolution Collection
2023.
@collection{Salameh-neurips23a,
title = {AutoGO: Automated Computation Graph Optimization for Neural Network Evolution},
author = {Mohammad Salameh and Keith G. Mills and Negar Hassanpour and Fred X. Han and Shuting Zhang and Wei Lu and Shangling Jui and Chunhua Zhou and Fengyu Sun and Di Niu},
url = {https://openreview.net/pdf?id=lDI3ZuyzM9},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {37th Conference on Neural Information Processing Systems (NeurIPS 2023)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
Building A Mobile Text Recognizer via Truncated SVD-based Knowledge Distillation-Guided NAS Collection
2023.
@collection{Lin-bmvc2023,
title = {Building A Mobile Text Recognizer via Truncated SVD-based Knowledge Distillation-Guided NAS},
author = {Weifeng Lin and Canyu Xie and Dezhi Peng and Jiapeng Wang and Lianwen Jin and Wei Ding and Cong Yao and Mengchao He},
url = {https://papers.bmvc2023.org/0375.pdf},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {The 34th British Machine Vision Conference},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Wang, Shang; Tang, Huangrong
GRADIENT-FREE PROXY FOR EFFICIENT LANGUAGE MODEL SEARCH Technical Report
2023.
@techreport{Wang-rg23a,
title = {GRADIENT-FREE PROXY FOR EFFICIENT LANGUAGE MODEL SEARCH},
author = {Shang Wang and Huangrong Tang},
url = {https://www.researchgate.net/profile/Shang-Wang-23/publication/376072656_GRADIENT-FREE_PROXY_FOR_EFFICIENT_LANGUAGE_MODEL_SEARCH/links/6568af28b1398a779dc7962b/GRADIENT-FREE-PROXY-FOR-EFFICIENT-LANGUAGE-MODEL-SEARCH.pdf},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hundhausen, Felix; Hubschneider, Simon; Asfou, Tamim
Grasping with Humanoid Hands based on In-Hand Vision and Hardware-accelerated CNNs Technical Report
2023.
@techreport{Hundhausen-23a,
title = {Grasping with Humanoid Hands based on In-Hand Vision and Hardware-accelerated CNNs},
author = {Felix Hundhausen and Simon Hubschneider and Tamim Asfou},
url = {https://h2t.iar.kit.edu/pdf/Hundhausen2023.pdf},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xue, Yu; Tong, Weinan; Neri, Ferrante; Chen, Peng; Luo, Tao; Zhen, Liangli; Wang, Xiao
Evolutionary Architecture Search for Generative Adversarial Networks Based On Weight Sharing Technical Report
2023.
@techreport{Xue-23a,
title = {Evolutionary Architecture Search for Generative Adversarial Networks Based On Weight Sharing},
author = {Yu Xue and Weinan Tong and Ferrante Neri and Peng Chen and Tao Luo and Liangli Zhen and Xiao Wang},
url = {https://s3.eu-central-1.amazonaws.com/eu-st01.ext.exlibrisgroup.com/44SUR_INST/storage/alma/6C/5F/BC/AE/64/94/51/5E/79/33/7C/E9/F1/86/8F/A6/TEVC_00119_2023_manuscript_R2.pdf?response-content-type=application%2Fpdf&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20231220T093633Z&X-Amz-SignedHeaders=host&X-Amz-Expires=119&X-Amz-Credential=AKIAJN6NPMNGJALPPWAQ%2F20231220%2Feu-central-1%2Fs3%2Faws4_request&X-Amz-Signature=ef6f0f810a9a364744dfb3260a0578af25980e9662f1017ace22a03fe7be693b},
year = {2023},
date = {2023-12-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Al-Sabri, Raeed; Gao, Jianliang; Chen, Jiamin; Oloulade, Babatounde Moctard; Lyu, Tengfei
AutoTGRL: an automatic text-graph representation learning framework Journal Article
In: Neural Computing and Applications, 2023.
@article{Sabri-nca23a,
title = {AutoTGRL: an automatic text-graph representation learning framework},
author = {
Raeed Al-Sabri and Jianliang Gao and Jiamin Chen and Babatounde Moctard Oloulade and Tengfei Lyu
},
url = {https://link.springer.com/article/10.1007/s00521-023-09226-0},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
journal = { Neural Computing and Applications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
SASNAUSKAS, PAULIUS; PETKEVIČIUS, INAS
Symbolic Neural Architecture Search for Differential Equations Journal Article
In: 2023.
@article{SASNAUSKAS-ieeeaccess23a,
title = {Symbolic Neural Architecture Search for Differential Equations},
author = {PAULIUS SASNAUSKAS and INAS PETKEVIČIUS},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10354328&tag=1},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Luo, Xiangzhong
Hardware-aware neural architecture search and compression towards embedded intelligence PhD Thesis
2023.
@phdthesis{LuoPHD23a,
title = {Hardware-aware neural architecture search and compression towards embedded intelligence},
author = {Luo, Xiangzhong},
url = {https://dr.ntu.edu.sg/handle/10356/172506},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Huai, Shuo
Enabling efficient edge intelligence: a hardware-software codesign approach PhD Thesis
2023.
@phdthesis{Huai-phd23a,
title = {Enabling efficient edge intelligence: a hardware-software codesign approach},
author = {Huai, Shuo},
url = {https://dr.ntu.edu.sg/handle/10356/172499},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
(Ed.)
Improving Natural Language Understanding with Computation-Efficient Retrieval Representation Fusion Collection
2023.
@collection{nokey,
title = {Improving Natural Language Understanding with Computation-Efficient Retrieval Representation Fusion},
author = {Shangyu Wu and Ying Xiong and Yufei Cui and Xue Liu and Buzhou Tang and Tei-WEi Kuo and Chun Jason Xue},
url = {https://neurips2023-enlsp.github.io/papers/paper_79.pdf},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {The third version of the Efficient Natural Language and Speech Processing (ENLSP-III) workshop, NeurIPS2023},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Park, Soohyun; Son, Seok Bin; Lee, Youn Kyu; Jung, Soyi; Kim, Joongheon
Two-stage architectural fine-tuning for neural architecture search in efficient transfer learning Journal Article
In: ELECTRONICS LETTERS , vol. 59, no. 24, 2023.
@article{Park-el23a,
title = {Two-stage architectural fine-tuning for neural architecture search in efficient transfer learning},
author = {Soohyun Park and Seok Bin Son and Youn Kyu Lee and Soyi Jung and Joongheon Kim},
url = {https://ietresearch.onlinelibrary.wiley.com/doi/pdfdirect/10.1049/ell2.13066},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
journal = {ELECTRONICS LETTERS },
volume = {59},
number = {24},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Saluky, Saluky; Nugraha, Gusti Baskara; Supangkat, Suhono Harso
Enhancing Abandoned Object Detection with Dual Background Models and Yolo-NAS Journal Article
In: International Journal of Intelligent Systems and Applications in Engineering, vol. 12, no. 2, pp. 547–554, 2023.
@article{Saluky_Nugraha_Supangkat_2023,
title = {Enhancing Abandoned Object Detection with Dual Background Models and Yolo-NAS},
author = {Saluky Saluky and Gusti Baskara Nugraha and Suhono Harso Supangkat},
url = {https://ijisae.org/index.php/IJISAE/article/view/4298},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
journal = {International Journal of Intelligent Systems and Applications in Engineering},
volume = {12},
number = {2},
pages = {547–554},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bellodi, E.; Bertozzi, D.; Bizzarri, A.; Favalli, M.; Fraccaroli, M.; Zese, R.
Efficient Resource-Aware Neural Architecture Search with a Neuro-Symbolic Approach Proceedings Article
In: 2023 IEEE 16th International Symposium on Embedded Multicore/Many-core Systems-on-Chip (MCSoC), pp. 171-178, IEEE Computer Society, Los Alamitos, CA, USA, 2023.
@inproceedings{10387830,
title = {Efficient Resource-Aware Neural Architecture Search with a Neuro-Symbolic Approach},
author = {E. Bellodi and D. Bertozzi and A. Bizzarri and M. Favalli and M. Fraccaroli and R. Zese},
url = {https://doi.ieeecomputersociety.org/10.1109/MCSoC60832.2023.00034},
doi = {10.1109/MCSoC60832.2023.00034},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {2023 IEEE 16th International Symposium on Embedded Multicore/Many-core Systems-on-Chip (MCSoC)},
pages = {171-178},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Hardware-aware Neural Architectural Search (NAS) is gaining momentum to enable the deployment of deep learning on edge devices with limited computing capabilities. Incorporating device-related objectives such as affordable floating point operations, latency, power, memory usage, etc. into the optimization process makes searching for the most efficient neural architecture more complicated, since both model accuracy and hardware cost should guide the search. The main concern with most state-of-the-art hardware-aware NAS strategies is that they propose for evaluation also trivially infeasible network models for the capabilities of the hardware platform at hand. Moreover, previously generated models are frequently not exploited to intelligently generate new ones, leading to prohibitive computational costs for practical relevance. This paper aims to boost the computational efficiency of hardware-aware NAS by means of a neuro-symbolic framework revolving around a Probabilistic Inductive Logic Programming module to define and exploit a set of symbolic rules. This component learns and refines the probabilities associated with the rules, allowing the framework to adapt and improve over time, thus quickly narrowing down the search space toward the most promising neural architectures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kapoor, A.; Soans, R.; Dixit, S.; Ns, P.; Singh, B.; Das, M.
NASEREX: Optimizing Early Exits via AutoML for Scalable Efficient Inference in Big Image Streams Proceedings Article
In: 2023 IEEE International Conference on Big Data (BigData), pp. 5266-5271, IEEE Computer Society, Los Alamitos, CA, USA, 2023.
@inproceedings{10386502,
title = {NASEREX: Optimizing Early Exits via AutoML for Scalable Efficient Inference in Big Image Streams},
author = {A. Kapoor and R. Soans and S. Dixit and P. Ns and B. Singh and M. Das},
url = {https://doi.ieeecomputersociety.org/10.1109/BigData59044.2023.10386502},
doi = {10.1109/BigData59044.2023.10386502},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
booktitle = {2023 IEEE International Conference on Big Data (BigData)},
pages = {5266-5271},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {We investigate the problem of smart operational efficiency, at scale, in Machine Learning models for Big Data streams, in context of embedded AI applications, by learning optimal early exits. Embedded AI applications that employ deep neural models depend on efficient model inference at scale, especially on resource-constrained hardware. Recent vision/text/audio models are computationally complex with huge parameter spaces and input samples typically pass through multiple layers, each with large tensor computations, to produce valid outputs. Generally, in most real scenarios, AI applications deal with big data streams, such as streams of audio signals, static images and/or high resolution video frames. Deep ML models powering such applications have to continuously perform inference on such big data streams for varied tasks such as noise suppression, face detection, gait estimation and so on. Ensuring efficiency is challenging, even with model compression techniques since they reduce model size but often fail to achieve scalable inference efficiency over continuous streams. Early exits enable adaptive inference by extracting valid outputs from any pre-final layer of a deep model which significantly boosts efficiency at scale since many of the input instances need not be processed at all the layers of a deep model, especially for big streams. Suitable early exit structure design (number + positions) is a difficult but crucial aspect in improving efficiency without any loss in predictive performance, especially in context of big streams. Naive manual early exit design that does not consider the hardware capacity or data stream characteristics is counterproductive. We propose NASEREX framework that leverages Neural architecture Search (NAS) with a novel saliency-constrained search space and exit decision metric to learn suitable early exit structure to augment Deep Neural models for scalable efficient inference on big image streams. Optimized exit-augmented models perform $approx 2.5 times$ faster having $approx 4 times$ aggregated lower effective FLOPs, with no significant accuracy loss.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Heuillet, Alexandre
Exploring deep neural network differentiable architecture design PhD Thesis
Université Paris-Saclay, 2023.
@phdthesis{heuillet:tel-04420933,
title = {Exploring deep neural network differentiable architecture design},
author = {Alexandre Heuillet},
url = {https://hal.science/tel-04420933},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
number = {2023UPASG069},
school = {Université Paris-Saclay},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Chen, Hui; Li, Nannan; Chen, Rong
Ni-DehazeNet: representation learning via bilevel optimized architecture search for nighttime dehazing Journal Article
In: The Visual Computer, 2023.
@article{Chen-vc23a,
title = {Ni-DehazeNet: representation learning via bilevel optimized architecture search for nighttime dehazing},
author = {
Hui Chen and Nannan Li and Rong Chen
},
url = {https://link.springer.com/article/10.1007/s00371-023-03159-4},
year = {2023},
date = {2023-11-28},
journal = {The Visual Computer},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lyu, Zonglei; Yu, Tong; Pan, Fuxi; Zhang, Yilin; Luo, Jia; Zhang, Dan; Chen, Yiren; Zhang, Bo; Li, Guangyao
A survey of model compression strategies for object detection Journal Article
In: Multimedia Tools and Applications , 2023.
@article{nokey,
title = {A survey of model compression strategies for object detection},
author = {
Zonglei Lyu and Tong Yu and Fuxi Pan and Yilin Zhang and Jia Luo and Dan Zhang and Yiren Chen and Bo Zhang and Guangyao Li
},
url = {https://link.springer.com/article/10.1007/s11042-023-17192-x},
year = {2023},
date = {2023-11-02},
urldate = {2023-11-02},
journal = {Multimedia Tools and Applications },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
POYSER, MATTHEW
2023.
@phdthesis{POYSER-phd2023,
title = {Minimizing Computational Resources for Deep Machine Learning: A Compression and Neural Architecture Search Perspective for Image Classification and Object Detection},
author = {POYSER, MATTHEW},
url = {http://etheses.dur.ac.uk/15207/1/main.pdf},
year = {2023},
date = {2023-11-01},
urldate = {2023-11-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Liu, Shiya
Energy-efficient Neuromorphic Computing for Resource-constrained Internet of Things Devices PhD Thesis
2023.
@phdthesis{Liu-phd23a,
title = {Energy-efficient Neuromorphic Computing for Resource-constrained Internet of Things Devices},
author = {Liu, Shiya},
url = {https://vtechworks.lib.vt.edu/handle/10919/116629},
year = {2023},
date = {2023-11-01},
urldate = {2023-11-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}