Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2022
Ingolfsson, Thorir Mar; Vero, Mark; Wang, Xiaying; Lamberti, Lorenzo; Benini, Luca; Spallanzani, Matteo
Reducing neural architecture search spaces with training-free statistics and computational graph clustering Proceedings Article
In: Proceedings of the 19th ACM International Conference on Computing Frontiers, pp. 213–214, 2022.
@inproceedings{ingolfsson2022reducing,
title = {Reducing neural architecture search spaces with training-free statistics and computational graph clustering},
author = {Thorir Mar Ingolfsson and Mark Vero and Xiaying Wang and Lorenzo Lamberti and Luca Benini and Matteo Spallanzani},
url = {https://arxiv.org/pdf/2204.14103.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 19th ACM International Conference on Computing Frontiers},
pages = {213--214},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Le, Cat P.; Soltani, Mohammadreza; Dong, Juncheng; Tarokh, Vahid
Fisher Task Distance and its Application in Neural Architecture Search Journal Article
In: IEEE Access, vol. 10, pp. 47235-47249, 2022.
@article{9766163,
title = {Fisher Task Distance and its Application in Neural Architecture Search},
author = {Cat P. Le and Mohammadreza Soltani and Juncheng Dong and Vahid Tarokh},
url = {https://ieeexplore.ieee.org/abstract/document/9766163},
doi = {10.1109/ACCESS.2022.3171741},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Access},
volume = {10},
pages = {47235-47249},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Akin, Berkin; Gupta, Suyog; Long, Yun; Spiridonov, Anton; Wang, Zhuo; White, Marie; Xu, Hao; Zhou, Ping; Zhou, Yanqi
Searching for Efficient Neural Architectures for On-Device ML on Edge TPUs Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2204-14007,
title = {Searching for Efficient Neural Architectures for On-Device ML on Edge TPUs},
author = {Berkin Akin and Suyog Gupta and Yun Long and Anton Spiridonov and Zhuo Wang and Marie White and Hao Xu and Ping Zhou and Yanqi Zhou},
url = {https://doi.org/10.48550/arXiv.2204.14007},
doi = {10.48550/arXiv.2204.14007},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2204.14007},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Linnan; Yu, Chenhan; Salian, Satish; Kierat, Slawomir; Migacz, Szymon; Florea, Alex Fit
Searching the Deployable Convolution Neural Networks for GPUs Proceedings Article
In: CVPR 2022, 2022.
@inproceedings{DBLP:journals/corr/abs-2205-00841,
title = {Searching the Deployable Convolution Neural Networks for GPUs},
author = {Linnan Wang and Chenhan Yu and Satish Salian and Slawomir Kierat and Szymon Migacz and Alex Fit Florea},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Wang_Searching_the_Deployable_Convolution_Neural_Networks_for_GPUs_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {CVPR 2022},
journal = {CoRR},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kawa, Sajad Ahmad; Wani, M Arif
Designing Convolution Neural Network Architecture by utilizing the Complexity Model of the Dataset Proceedings Article
In: 2022 9th International Conference on Computing for Sustainable Global Development (INDIACom), pp. 221-225, 2022.
@inproceedings{9763256,
title = {Designing Convolution Neural Network Architecture by utilizing the Complexity Model of the Dataset},
author = {Sajad Ahmad Kawa and M Arif Wani},
url = {https://ieeexplore.ieee.org/abstract/document/9763256},
doi = {10.23919/INDIACom54597.2022.9763256},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 9th International Conference on Computing for Sustainable Global Development (INDIACom)},
pages = {221-225},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dong, Zhen; Zhou, Kaicheng; Li, Guohao; Zhou, Qiang; Guo, Mingfei; Ghanem, Bernard; Keutzer, Kurt; Zhang, Shanghang
UnrealNAS: Can We Search Neural Architectures with Unreal Data? Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-02162,
title = {UnrealNAS: Can We Search Neural Architectures with Unreal Data?},
author = {Zhen Dong and Kaicheng Zhou and Guohao Li and Qiang Zhou and Mingfei Guo and Bernard Ghanem and Kurt Keutzer and Shanghang Zhang},
url = {https://doi.org/10.48550/arXiv.2205.02162},
doi = {10.48550/arXiv.2205.02162},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.02162},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Pourchot, Aloïs; Bailly, Kévin; Ducarouge, Alexis; Sigaud, Olivier
An extensive appraisal of weight-sharing on the NAS-Bench-101 benchmark Journal Article
In: Neurocomputing, vol. 498, pp. 28-42, 2022, ISSN: 0925-2312.
@article{POURCHOT202228,
title = {An extensive appraisal of weight-sharing on the NAS-Bench-101 benchmark},
author = {Aloïs Pourchot and Kévin Bailly and Alexis Ducarouge and Olivier Sigaud},
url = {https://www.sciencedirect.com/science/article/pii/S092523122200501X},
doi = {https://doi.org/10.1016/j.neucom.2022.04.108},
issn = {0925-2312},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Neurocomputing},
volume = {498},
pages = {28-42},
abstract = {Weight-sharing (WS) has recently emerged as a paradigm to accelerate the automated search for efficient neural architectures, a process dubbed Neural Architecture Search (NAS). By using and training the same set of weights for the whole search space, WS allows for the quick evaluation of millions of architectures, where classical NAS approaches require lengthy individual trainings. Although very appealing, WS is not without drawbacks and several works have started to question its capabilities on small hand-crafted benchmarks. In this paper, we take advantage of the NAS-Bench-101 dataset to challenge the efficiency of a uniform-sampling based WS variant on several representative search spaces. After reviewing previous studies on WS and highlighting several of their shortcomings, we introduce our own experimental setup, from which we extract several good practices that one should keep in mind when evaluating WS. With our experiments we first establish that, given the correct evaluation procedure, WS is able to produce accuracy scores decently correlated with standalone ones. We then provide evidence that on some search spaces, this WS variant is able to rapidly find better than random architectures, whilst it is equivalent or sometimes even worse than a baseline random search on others, as we find that given the same budget, the probability of superiority of an architecture found using WS over an architecture found through random search can vary between 7% and 78% depending on the search space. We present evidence that the search space itself has an intricate effect on the capabilities of WS and can bias weight-sharing towards certain architectural patterns with no clear accuracy advantage. We conclude that the impact of WS is heavily search-space dependent and difficult to anticipate for a given problem.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jin, Charles; Phothilimthana, Phitchaya Mangpo; Roy, Sudip
(alpha)NAS: Neural Architecture Search using Property Guided Synthesis Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-03960,
title = {(alpha)NAS: Neural Architecture Search using Property Guided Synthesis},
author = {Charles Jin and Phitchaya Mangpo Phothilimthana and Sudip Roy},
url = {https://doi.org/10.48550/arXiv.2205.03960},
doi = {10.48550/arXiv.2205.03960},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.03960},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Ting-Ting; Chu, Shu-Chuan; Hu, Chia-Cheng; Jia, Han-Dong; Pan, Jeng-Shyang
Efficient Network Architecture Search Using Hybrid Optimizer Journal Article
In: Entropy, vol. 24, no. 5, 2022, ISSN: 1099-4300.
@article{e24050656,
title = {Efficient Network Architecture Search Using Hybrid Optimizer},
author = {Ting-Ting Wang and Shu-Chuan Chu and Chia-Cheng Hu and Han-Dong Jia and Jeng-Shyang Pan},
url = {https://www.mdpi.com/1099-4300/24/5/656},
doi = {10.3390/e24050656},
issn = {1099-4300},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Entropy},
volume = {24},
number = {5},
abstract = {Manually designing a convolutional neural network (CNN) is an important deep learning method for solving the problem of image classification. However, most of the existing CNN structure designs consume a significant amount of time and computing resources. Over the years, the demand for neural architecture search (NAS) methods has been on the rise. Therefore, we propose a novel deep architecture generation model based on Aquila optimization (AO) and a genetic algorithm (GA). The main contributions of this paper are as follows: Firstly, a new encoding strategy representing the CNN coding structure is proposed, so that the evolutionary computing algorithm can be combined with CNN. Secondly, a new mechanism for updating location is proposed, which incorporates three typical operators from GA cleverly into the model we have designed so that the model can find the optimal solution in the limited search space. Thirdly, the proposed method can deal with the variable-length CNN structure by adding skip connections. Fourthly, combining traditional CNN layers and residual blocks and introducing a grouping strategy provides greater possibilities for searching for the optimal CNN structure. Additionally, we use two notable datasets, consisting of the MNIST and CIFAR-10 datasets for model evaluation. The experimental results show that our proposed model has good results in terms of search accuracy and time.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Boyang; Lu, Qing; Jiang, Weiwen; Jung, Taeho; Shi, Yiyu
A collaboration strategy in the mining pool for proof-of-neural-architecture consensus Journal Article
In: Blockchain: Research and Applications, pp. 100089, 2022, ISSN: 2096-7209.
@article{LI2022100089,
title = {A collaboration strategy in the mining pool for proof-of-neural-architecture consensus},
author = {Boyang Li and Qing Lu and Weiwen Jiang and Taeho Jung and Yiyu Shi},
url = {https://www.sciencedirect.com/science/article/pii/S2096720922000306},
doi = {https://doi.org/10.1016/j.bcra.2022.100089},
issn = {2096-7209},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Blockchain: Research and Applications},
pages = {100089},
abstract = {In most popular public accessible cryptocurrency systems, the mining pool plays a key role because mining cryptocurrency with the mining pool turns the non-profitable situation into profitable for individual miners. In many recent novel blockchain consensuses, the deep learning training procedure becomes the task for miners to prove their workload, thus the computation power of miners will not purely be spent on the hash puzzle. In this way, the hardware and energy will support the blockchain service and deep learning training simultaneously. While the incentive of miners is to earn tokens, individual miners are motivated to join mining pools to become more competitive. In this paper, we are the first to demonstrate a mining pool solution for novel consensuses based on deep learning. The mining pool manager partitions the full searching space into subspaces and all miners are scheduled to collaborate on the Neural Architecture Search (NAS) tasks in the assigned subspace. Experiments demonstrate that the performance of this type of mining pool is more competitive than an individual miner. Due to the uncertainty of miners’ behaviors, the mining pool manager checks the standard deviation of the performance of high reward miners and prepares backup miners to ensure completion of the tasks of high reward miners.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zheng, Chenyu; Wang, Junjue; Ma, Ailong; Zhong, Yanfei
AutoLC: Search Lightweight and Top-Performing Architecture for Remote Sensing Image Land-Cover Classification Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-05369,
title = {AutoLC: Search Lightweight and Top-Performing Architecture for Remote Sensing Image Land-Cover Classification},
author = {Chenyu Zheng and Junjue Wang and Ailong Ma and Yanfei Zhong},
url = {https://doi.org/10.48550/arXiv.2205.05369},
doi = {10.48550/arXiv.2205.05369},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.05369},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Basha, S. H. Shabbeer; Tula, Debapriya; Vinakota, Sravan Kumar; Dubey, Shiv Ram
Target Aware Network Architecture Search and Compression for Efficient Knowledge Transfer Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-05967,
title = {Target Aware Network Architecture Search and Compression for Efficient Knowledge Transfer},
author = {S. H. Shabbeer Basha and Debapriya Tula and Sravan Kumar Vinakota and Shiv Ram Dubey},
url = {https://doi.org/10.48550/arXiv.2205.05967},
doi = {10.48550/arXiv.2205.05967},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.05967},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ren, Yankun; Li, Longfei; Yang, Xinxing; Zhou, Jun
AutoTransformer: Automatic Transformer Architecture Design For Time Series Classification Proceedings Article
In: Advances in Knowledge Discovery and Data Mining: 26th Pacific-Asia Conference, PAKDD 2022, Chengdu, China, May 16–19, 2022, Proceedings, Part I, pp. 143–155, Springer-Verlag, Chengdu, China, 2022, ISBN: 978-3-031-05932-2.
@inproceedings{10.1007/978-3-031-05933-9_12,
title = {AutoTransformer: Automatic Transformer Architecture Design For Time Series Classification},
author = {Yankun Ren and Longfei Li and Xinxing Yang and Jun Zhou},
url = {https://doi.org/10.1007/978-3-031-05933-9_12},
doi = {10.1007/978-3-031-05933-9_12},
isbn = {978-3-031-05932-2},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Advances in Knowledge Discovery and Data Mining: 26th Pacific-Asia Conference, PAKDD 2022, Chengdu, China, May 16–19, 2022, Proceedings, Part I},
pages = {143–155},
publisher = {Springer-Verlag},
address = {Chengdu, China},
abstract = {Time series classification (TSC) aims to assign labels to time series. Deep learning methods, such as InceptionTime and Transformer, achieve promising performances in TSC. Although deep learning methods do not require manually crafted features, they do require careful manual design of the network structure. The design of architectures heavily relies on researchers’ prior knowledge and experience. Due to the limitations of human’s knowledge, the designed architecture may not be optimal on the dataset of interest. To automate and optimize the architecture design, we propose a data-driven TSC network architecture design method called AutoTransformer. AutoTransformer designs the suitable network architecture automatically depending on the target TSC dataset. Inspired by the overall architecture of Transformer, we first propose a novel search space tailored for TSC. The search space includes a variety of substructures that are capable of extracting global and local features from time series. Then, with the help of neural architecture search (NAS) technique, a suitable network architecture for the target TSC dataset can be found from the search space. Experimental results show that AutoTransformer finds proper architectures on different TSC datasets and outperforms state-of-the-art methods on the UCR archive. Ablation studies verify the effectiveness of the proposed search space.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Deng, Difan; Karl, Florian; Hutter, Frank; Bischl, Bernd; Lindauer, Marius
Efficient Automated Deep Learning for Time Series Forecasting Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-05511,
title = {Efficient Automated Deep Learning for Time Series Forecasting},
author = {Difan Deng and Florian Karl and Frank Hutter and Bernd Bischl and Marius Lindauer},
url = {https://doi.org/10.48550/arXiv.2205.05511},
doi = {10.48550/arXiv.2205.05511},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.05511},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Du, Mengge; Chen, Yuntian; Zhang, Dongxiao
AutoKE: An automatic knowledge embedding framework for scientific machine learning Journal Article
In: CoRR, vol. abs/2205.05390, 2022.
@article{DBLP:journals/corr/abs-2205-05390,
title = {AutoKE: An automatic knowledge embedding framework for scientific machine learning},
author = {Mengge Du and Yuntian Chen and Dongxiao Zhang},
url = {https://doi.org/10.48550/arXiv.2205.05390},
doi = {10.48550/arXiv.2205.05390},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.05390},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kim, Do-Guk; Lee, Heung-Chang
Proxyless Neural Architecture Adaptation for Supervised Learning and Self-Supervised Learning Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-07168,
title = {Proxyless Neural Architecture Adaptation for Supervised Learning and Self-Supervised Learning},
author = {Do-Guk Kim and Heung-Chang Lee},
url = {https://doi.org/10.48550/arXiv.2205.07168},
doi = {10.48550/arXiv.2205.07168},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.07168},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Sun, Jialiang; Zheng, Xiaohu; Yao, Wen; Zhang, Xiaoya; Zhou, Weien
Heat Source Layout Optimization Using Automatic Deep Learning Surrogate Model and Multimodal Neighborhood Search Algorithm Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-07812,
title = {Heat Source Layout Optimization Using Automatic Deep Learning Surrogate Model and Multimodal Neighborhood Search Algorithm},
author = {Jialiang Sun and Xiaohu Zheng and Wen Yao and Xiaoya Zhang and Weien Zhou},
url = {https://doi.org/10.48550/arXiv.2205.07812},
doi = {10.48550/arXiv.2205.07812},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.07812},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Belciug, Smaranda
Learning deep neural networks' architectures using differential evolution. Case study: Medical imaging processing Journal Article
In: Computers in Biology and Medicine, vol. 146, pp. 105623, 2022, ISSN: 0010-4825.
@article{BELCIUG2022105623,
title = {Learning deep neural networks' architectures using differential evolution. Case study: Medical imaging processing},
author = {Smaranda Belciug},
url = {https://www.sciencedirect.com/science/article/pii/S0010482522004152},
doi = {https://doi.org/10.1016/j.compbiomed.2022.105623},
issn = {0010-4825},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Computers in Biology and Medicine},
volume = {146},
pages = {105623},
abstract = {The COVID-19 pandemic has changed the way we practice medicine. Cancer patient and obstetric care landscapes have been distorted. Delaying cancer diagnosis or maternal-fetal monitoring increased the number of preventable deaths or pregnancy complications. One solution is using Artificial Intelligence to help the medical personnel establish the diagnosis in a faster and more accurate manner. Deep learning is the state-of-the-art solution for image classification. Researchers manually design the structure of fix deep learning neural networks structures and afterwards verify their performance. The goal of this paper is to propose a potential method for learning deep network architectures automatically. As the number of networks architectures increases exponentially with the number of convolutional layers in the network, we propose a differential evolution algorithm to traverse the search space. At first, we propose a way to encode the network structure as a candidate solution of fixed-length integer array, followed by the initialization of differential evolution method. A set of random individuals is generated, followed by mutation, recombination, and selection. At each generation the individuals with the poorest loss values are eliminated and replaced with more competitive individuals. The model has been tested on three cancer datasets containing MRI scans and histopathological images and two maternal-fetal screening ultrasound images. The novel proposed method has been compared and statistically benchmarked to four state-of-the-art deep learning networks: VGG16, ResNet50, Inception V3, and DenseNet169. The experimental results showed that the model is competitive to other state-of-the-art models, obtaining accuracies between 78.73% and 99.50% depending on the dataset it had been applied on.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Frey, Nathan; Soklaski, Ryan; Axelrod, Simon; Samsi, Siddharth; Gomez-Bombarelli, Rafael; Coley, Connor; Gadepally, Vijay
Neural Scaling of Deep Chemical Models Journal Article
In: ChemRxiv, 2022.
@article{frey_soklaski_axelrod_samsi_gomez-bombarelli_coley_gadepally_2022,
title = {Neural Scaling of Deep Chemical Models},
author = {Nathan Frey and Ryan Soklaski and Simon Axelrod and Siddharth Samsi and Rafael Gomez-Bombarelli and Connor Coley and Vijay Gadepally},
doi = {10.26434/chemrxiv-2022-3s512},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {ChemRxiv},
publisher = {Cambridge Open Engage},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Miriyala, Srinivas Soumitri; Pujari, Keerthi NagaSree; Naik, Sakshi; Mitra, Kishalay
Evolutionary neural architecture search for surrogate models to enable optimization of industrial continuous crystallization process Journal Article
In: Powder Technology, vol. 405, pp. 117527, 2022, ISSN: 0032-5910.
@article{MIRIYALA2022117527,
title = {Evolutionary neural architecture search for surrogate models to enable optimization of industrial continuous crystallization process},
author = {Srinivas Soumitri Miriyala and Keerthi NagaSree Pujari and Sakshi Naik and Kishalay Mitra},
url = {https://www.sciencedirect.com/science/article/pii/S0032591022004211},
doi = {https://doi.org/10.1016/j.powtec.2022.117527},
issn = {0032-5910},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Powder Technology},
volume = {405},
pages = {117527},
abstract = {Optimal performance of the crystallization process is of utmost importance for industries handling bulk commodity chemicals to pharmaceuticals. Such an optimization exercise becomes extremely time expensive as the mathematical models mimicking such complex processes involve the solution of Integro-Differential Population Balance Equations using High Resolution Finite Volume Methods. In order to build a fast and robust data based alternative model, a surrogate assisted approach using Artificial Neural Networks has been proposed here. To overcome the heuristics-based estimation of the hyper-parameters in ANNs, we aim to contribute a novel Neural Architecture Search strategy for the auto-tuning of hyper-parameters integrated with sample size determination techniques. While solving a multi-objective optimization of crystallization process ensuring maximum productivity, the results from surrogates are compared with those of a high-fidelity physics driven model, which reports five order of magnitude speed improvement without sacrificing much on accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lee, Joo-Hyun; Chang, Joon-Hyuk; Yang, Jae-Mo; Moon, Han-Gil
NAS-TasNet: Neural Architecture Search for Time-Domain Speech Separation Journal Article
In: IEEE Access, vol. 10, pp. 56031-56043, 2022.
@article{9777717,
title = {NAS-TasNet: Neural Architecture Search for Time-Domain Speech Separation},
author = {Joo-Hyun Lee and Joon-Hyuk Chang and Jae-Mo Yang and Han-Gil Moon},
url = {https://ieeexplore.ieee.org/abstract/document/9777717},
doi = {10.1109/ACCESS.2022.3176003},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Access},
volume = {10},
pages = {56031-56043},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Qian, Qi; Sang, Qingbing
No-reference image quality assessment based on automatic machine learning Journal Article
In: ITM Web Conf., vol. 45, pp. 01034, 2022.
@article{refId0b,
title = {No-reference image quality assessment based on automatic machine learning},
author = {Qi Qian and Qingbing Sang},
url = {https://doi.org/10.1051/itmconf/20224501034},
doi = {10.1051/itmconf/20224501034},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {ITM Web Conf.},
volume = {45},
pages = {01034},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ogundokun, Roseline Oluwaseun; Misra, Sanjay; Douglas, Mychal; Damaševičius, Robertas; Maskeliūnas, Rytis
Medical Internet-of-Things Based Breast Cancer Diagnosis Using Hyperparameter-Optimized Neural Networks Journal Article
In: Future Internet, vol. 14, no. 5, 2022, ISSN: 1999-5903.
@article{fi14050153,
title = {Medical Internet-of-Things Based Breast Cancer Diagnosis Using Hyperparameter-Optimized Neural Networks},
author = {Roseline Oluwaseun Ogundokun and Sanjay Misra and Mychal Douglas and Robertas Damaševičius and Rytis Maskeliūnas},
url = {https://www.mdpi.com/1999-5903/14/5/153},
doi = {10.3390/fi14050153},
issn = {1999-5903},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Future Internet},
volume = {14},
number = {5},
abstract = {In today’s healthcare setting, the accurate and timely diagnosis of breast cancer is critical for recovery and treatment in the early stages. In recent years, the Internet of Things (IoT) has experienced a transformation that allows the analysis of real-time and historical data using artificial intelligence (AI) and machine learning (ML) approaches. Medical IoT combines medical devices and AI applications with healthcare infrastructure to support medical diagnostics. The current state-of-the-art approach fails to diagnose breast cancer in its initial period, resulting in the death of most women. As a result, medical professionals and researchers are faced with a tremendous problem in early breast cancer detection. We propose a medical IoT-based diagnostic system that competently identifies malignant and benign people in an IoT environment to resolve the difficulty of identifying early-stage breast cancer. The artificial neural network (ANN) and convolutional neural network (CNN) with hyperparameter optimization are used for malignant vs. benign classification, while the Support Vector Machine (SVM) and Multilayer Perceptron (MLP) were utilized as baseline classifiers for comparison. Hyperparameters are important for machine learning algorithms since they directly control the behaviors of training algorithms and have a significant effect on the performance of machine learning models. We employ a particle swarm optimization (PSO) feature selection approach to select more satisfactory features from the breast cancer dataset to enhance the classification performance using MLP and SVM, while grid-based search was used to find the best combination of the hyperparameters of the CNN and ANN models. The Wisconsin Diagnostic Breast Cancer (WDBC) dataset was used to test the proposed approach. The proposed model got a classification accuracy of 98.5% using CNN, and 99.2% using ANN.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xu, Ship Peng; Wang, Ke; Hassan, Md. Rafiul; Hassan, Mohammad Mehedi; Chen, Chien-Ming
An Interpretive Perspective: Adversarial Trojaning Attack on Neural-Architecture-Search Enabled Edge AI Systems Journal Article
In: IEEE Transactions on Industrial Informatics, pp. 1-1, 2022.
@article{9780600,
title = {An Interpretive Perspective: Adversarial Trojaning Attack on Neural-Architecture-Search Enabled Edge AI Systems},
author = {Ship Peng Xu and Ke Wang and Md. Rafiul Hassan and Mohammad Mehedi Hassan and Chien-Ming Chen},
url = {https://ieeexplore.ieee.org/abstract/document/9780600},
doi = {10.1109/TII.2022.3177442},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Industrial Informatics},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cummings, Daniel; Sarah, Anthony; Sridhar, Sharath Nittur; Szankin, Maciej; Muñoz, Juan Pablo; Sundaresan, Sairam
A Hardware-Aware Framework for Accelerating Neural Architecture Search Across Modalities Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-10358,
title = {A Hardware-Aware Framework for Accelerating Neural Architecture Search Across Modalities},
author = {Daniel Cummings and Anthony Sarah and Sharath Nittur Sridhar and Maciej Szankin and Juan Pablo Muñoz and Sairam Sundaresan},
url = {https://doi.org/10.48550/arXiv.2205.10358},
doi = {10.48550/arXiv.2205.10358},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.10358},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Yanyu; Zhao, Pu; Yuan, Geng; Lin, Xue; Wang, Yanzhi; Chen, Xin
Pruning-as-Search: Efficient Neural Architecture Search via Channel Pruning and Structural Reparameterization Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-01198,
title = {Pruning-as-Search: Efficient Neural Architecture Search via Channel Pruning and Structural Reparameterization},
author = {Yanyu Li and Pu Zhao and Geng Yuan and Xue Lin and Yanzhi Wang and Xin Chen},
url = {https://doi.org/10.48550/arXiv.2206.01198},
doi = {10.48550/arXiv.2206.01198},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.01198},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Smith, James Seale; Seymour, Zachary; Chiu, Han-Pang
Incremental Learning with Differentiable Architecture and Forgetting Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-09875,
title = {Incremental Learning with Differentiable Architecture and Forgetting Search},
author = {James Seale Smith and Zachary Seymour and Han-Pang Chiu},
url = {https://doi.org/10.48550/arXiv.2205.09875},
doi = {10.48550/arXiv.2205.09875},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.09875},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Tuli, Shikhar; Dedhia, Bhishma; Tuli, Shreshth; Jha, Niraj K.
FlexiBERT: Are Current Transformer Architectures too Homogeneous and Rigid? Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-11656,
title = {FlexiBERT: Are Current Transformer Architectures too Homogeneous and Rigid?},
author = {Shikhar Tuli and Bhishma Dedhia and Shreshth Tuli and Niraj K. Jha},
url = {https://doi.org/10.48550/arXiv.2205.11656},
doi = {10.48550/arXiv.2205.11656},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.11656},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Risso, Matteo; Burrello, Alessio; Conti, Francesco; Lamberti, Lorenzo; Chen, Yukai; Benini, Luca; Macii, Enrico; Poncino, Massimo; Pagliari, Daniele Jahier
Lightweight Neural Architecture Search for Temporal Convolutional Networks at the Edge Journal Article
In: IEEE Transactions on Computers, pp. 1-1, 2022.
@article{9782512,
title = {Lightweight Neural Architecture Search for Temporal Convolutional Networks at the Edge},
author = {Matteo Risso and Alessio Burrello and Francesco Conti and Lorenzo Lamberti and Yukai Chen and Luca Benini and Enrico Macii and Massimo Poncino and Daniele Jahier Pagliari},
url = {https://ieeexplore.ieee.org/abstract/document/9782512},
doi = {10.1109/TC.2022.3177955},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Computers},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gao, Yang; Zhang, Peng; Yang, Hong; Zhou, Chuan; Tian, Zhihong; Hu, Yue; Li, Zhao; Zhou, Jingren
GraphNAS++: Distributed Architecture Search for Graph Neural Networks Journal Article
In: IEEE Transactions on Knowledge and Data Engineering, pp. 1-1, 2022.
@article{9782531,
title = {GraphNAS++: Distributed Architecture Search for Graph Neural Networks},
author = {Yang Gao and Peng Zhang and Hong Yang and Chuan Zhou and Zhihong Tian and Yue Hu and Zhao Li and Jingren Zhou},
url = {https://ieeexplore.ieee.org/abstract/document/9782531},
doi = {10.1109/TKDE.2022.3178153},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Knowledge and Data Engineering},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Zeyang; Chen, Yidong; Zhou, Changle
Self-Growing Binary Activation Network: A Novel Deep Learning Model With Dynamic Architecture Journal Article
In: IEEE Transactions on Neural Networks and Learning Systems, pp. 1-10, 2022.
@article{9783448,
title = {Self-Growing Binary Activation Network: A Novel Deep Learning Model With Dynamic Architecture},
author = {Zeyang Zhang and Yidong Chen and Changle Zhou},
url = {https://ieeexplore.ieee.org/abstract/document/9783448},
doi = {10.1109/TNNLS.2022.3176027},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
pages = {1-10},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Abbasi, Saad; Wong, Alexander; Shafiee, Mohammad Javad
MAPLE-X: Latency Prediction with Explicit Microprocessor Prior Knowledge Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-12660,
title = {MAPLE-X: Latency Prediction with Explicit Microprocessor Prior Knowledge},
author = {Saad Abbasi and Alexander Wong and Mohammad Javad Shafiee},
url = {https://doi.org/10.48550/arXiv.2205.12660},
doi = {10.48550/arXiv.2205.12660},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.12660},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Paulo, Maurício; Turnes, Javier Noa; Happ, P.; Ferreira, Matheus; Marques, Haroldo; Feitosa, Raul
HOW FAR SHOULD I LOOK? A NEURAL ARCHITECTURE SEARCH STRATEGY FOR SEMANTIC SEGMENTATION OF REMOTE SENSING IMAGES Journal Article
In: ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences, vol. V-3-2022, pp. 17-23, 2022.
@article{MauricioISPRS2022,
title = {HOW FAR SHOULD I LOOK? A NEURAL ARCHITECTURE SEARCH STRATEGY FOR SEMANTIC SEGMENTATION OF REMOTE SENSING IMAGES},
author = {Maurício Paulo and Javier Noa Turnes and P. Happ and Matheus Ferreira and Haroldo Marques and Raul Feitosa},
doi = {10.5194/isprs-annals-V-3-2022-17-2022},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
volume = {V-3-2022},
pages = {17-23},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lopes, Vasco; Santos, Miguel; Degardin, Bruno; Alexandre, Luís A.
Efficient Guided Evolution for Neural Architecture Search Proceedings Article
In: GECCO2022, 2022.
@inproceedings{LopesGECCO2022,
title = {Efficient Guided Evolution for Neural Architecture Search},
author = {Vasco Lopes and Miguel Santos and Bruno Degardin and Luís A. Alexandre},
url = {http://www.di.ubi.pt/~lfbaa/pubs/GECCO2022.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {GECCO2022},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Yicheng; Han, Xiaotian; Chang, Chia-Yuan; Zha, Daochen; Braga-Neto, Ulisses; Hu, Xia
Auto-PINN: Understanding and Optimizing Physics-Informed Neural Architecture Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-13748,
title = {Auto-PINN: Understanding and Optimizing Physics-Informed Neural Architecture},
author = {Yicheng Wang and Xiaotian Han and Chia-Yuan Chang and Daochen Zha and Ulisses Braga-Neto and Xia Hu},
url = {https://doi.org/10.48550/arXiv.2205.13748},
doi = {10.48550/arXiv.2205.13748},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.13748},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Yanyu; Zhao, Pu; Yuan, Geng; Lin, Xue; Wang, Yanzhi; Chen, Xin
Pruning-as-Search: Efficient Neural Architecture Search via Channel Pruning and Structural Reparameterization Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-01198b,
title = {Pruning-as-Search: Efficient Neural Architecture Search via Channel Pruning and Structural Reparameterization},
author = {Yanyu Li and Pu Zhao and Geng Yuan and Xue Lin and Yanzhi Wang and Xin Chen},
url = {https://doi.org/10.48550/arXiv.2206.01198},
doi = {10.48550/arXiv.2206.01198},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.01198},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Risso, Matteo; Burrello, Alessio; Benini, Luca; Macii, Enrico; Poncino, Massimo; Pagliari, Daniele Jahier
Multi-Complexity-Loss DNAS for Energy-Efficient and Memory-Constrained Deep Neural Networks Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-00302,
title = {Multi-Complexity-Loss DNAS for Energy-Efficient and Memory-Constrained Deep Neural Networks},
author = {Matteo Risso and Alessio Burrello and Luca Benini and Enrico Macii and Massimo Poncino and Daniele Jahier Pagliari},
url = {https://doi.org/10.48550/arXiv.2206.00302},
doi = {10.48550/arXiv.2206.00302},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.00302},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Yanyu; Yuan, Geng; Wen, Yang; Hu, Eric; Evangelidis, Georgios; Tulyakov, Sergey; Wang, Yanzhi; Ren, Jian
EfficientFormer: Vision Transformers at MobileNet Speed Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-01191,
title = {EfficientFormer: Vision Transformers at MobileNet Speed},
author = {Yanyu Li and Geng Yuan and Yang Wen and Eric Hu and Georgios Evangelidis and Sergey Tulyakov and Yanzhi Wang and Jian Ren},
url = {https://doi.org/10.48550/arXiv.2206.01191},
doi = {10.48550/arXiv.2206.01191},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.01191},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zheng, Xiawu; Fei, Xiang; Zhang, Lei; Wu, Chenglin; Chao, Fei; Liu, Jianzhuang; Zeng, Wei; Tian, Yonghong; Ji, Rongrong
Neural Architecture Search With Representation Mutual Information Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11912–11921, 2022.
@inproceedings{zheng2022neural,
title = {Neural Architecture Search With Representation Mutual Information},
author = {Xiawu Zheng and Xiang Fei and Lei Zhang and Chenglin Wu and Fei Chao and Jianzhuang Liu and Wei Zeng and Yonghong Tian and Rongrong Ji},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Zheng_Neural_Architecture_Search_With_Representation_Mutual_Information_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {11912--11921},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pan, Junyi; Sun, Chong; Zhou, Yizhou; Zhang, Ying; Li, Chen
Distribution Consistent Neural Architecture Search Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10884–10893, 2022.
@inproceedings{pan2022distribution,
title = {Distribution Consistent Neural Architecture Search},
author = {Junyi Pan and Chong Sun and Yizhou Zhou and Ying Zhang and Chen Li},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Pan_Distribution_Consistent_Neural_Architecture_Search_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {10884--10893},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xie, Pengtao; Du, Xuefeng
Performance-Aware Mutual Knowledge Distillation for Improving Neural Architecture Search Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11922–11932, 2022.
@inproceedings{xie2022performance,
title = {Performance-Aware Mutual Knowledge Distillation for Improving Neural Architecture Search},
author = {Pengtao Xie and Xuefeng Du},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Xie_Performance-Aware_Mutual_Knowledge_Distillation_for_Improving_Neural_Architecture_Search_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {11922--11932},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, Kepeng; He, Gang
DNAS: A Decoupled Global Neural Architecture Search Method Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1979–1985, 2022.
@inproceedings{xu2022dnas,
title = {DNAS: A Decoupled Global Neural Architecture Search Method},
author = {Kepeng Xu and Gang He},
url = {https://openaccess.thecvf.com/content/CVPR2022W/NAS/papers/Xu_DNASA_Decoupled_Global_Neural_Architecture_Search_Method_CVPRW_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {1979--1985},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shang, Ronghua; Zhu, Songling; Ren, Jinhong; Liu, Hangcheng; Jiao, Licheng
Evolutionary neural architecture search based on evaluation correction and functional units Journal Article
In: Knowledge-Based Systems, vol. 251, pp. 109206, 2022, ISSN: 0950-7051.
@article{SHANG2022109206,
title = {Evolutionary neural architecture search based on evaluation correction and functional units},
author = {Ronghua Shang and Songling Zhu and Jinhong Ren and Hangcheng Liu and Licheng Jiao},
url = {https://www.sciencedirect.com/science/article/pii/S0950705122006001},
doi = {https://doi.org/10.1016/j.knosys.2022.109206},
issn = {0950-7051},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Knowledge-Based Systems},
volume = {251},
pages = {109206},
abstract = {Neural architecture search (NAS) has been a great success in the automated design of deep neural networks. However, neural architecture search using evolutionary algorithms is challenging due to the diverse structure of neural networks and the difficulty in performance evaluation. To this end, this paper proposes an evolutionary neural architecture search algorithm (called EF-ENAS) based on evaluation corrections and functional units. First, a mating selection operation based on evaluation correction is developed, which can help EF-ENAS discriminate high-performance network architectures and reduce the harmful effects of low fidelity accuracy evaluation methods. Then, a functional unit-based network architecture crossover operation is designed, which divides the neural network into different functional units for crossover and protects valuable network architectures from destruction. Finally, the idea of species protection is introduced into the traditional environmental selection operation and a species protection-based environmental selection operation is designed, which can improve the diversity of network architectures in a population. The EF-ENAS is tested on ten benchmark datasets with varying complexities. In addition, the proposed algorithm is compared with 44 state-of-the-art algorithms, including DARTS, EvoCNN, CNN-GA, AE-CNN, etc. The experimental results show that the proposed algorithm11The code of EF-ENAS is available at https://github.com/codesl173/EF-ENAS. can automatically design neural networks and perform better.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Shiqing; Zhang, Haoyu; Jin, Yaochu
A Survey on Surrogate-assisted Efficient Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-01520,
title = {A Survey on Surrogate-assisted Efficient Neural Architecture Search},
author = {Shiqing Liu and Haoyu Zhang and Yaochu Jin},
url = {https://doi.org/10.48550/arXiv.2206.01520},
doi = {10.48550/arXiv.2206.01520},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.01520},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Kim, Youngkee; Jung, Soyi; Choi, Minseok; Kim, Joongheon
Search Space Adaptation for Differentiable Neural Architecture Search in Image Classification Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-02098,
title = {Search Space Adaptation for Differentiable Neural Architecture Search in Image Classification},
author = {Youngkee Kim and Soyi Jung and Minseok Choi and Joongheon Kim},
url = {https://doi.org/10.48550/arXiv.2206.02098},
doi = {10.48550/arXiv.2206.02098},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.02098},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Wenshuo; Chen, Xinghao; Bai, Jinyu; Ning, Xuefei; Wang, Yunhe
Searching for Energy-Efficient Hybrid Adder-Convolution Neural Networks Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1943–1952, 2022.
@inproceedings{li2022searching,
title = {Searching for Energy-Efficient Hybrid Adder-Convolution Neural Networks},
author = {Wenshuo Li and Xinghao Chen and Jinyu Bai and Xuefei Ning and Yunhe Wang},
url = {https://openaccess.thecvf.com/content/CVPR2022W/NAS/papers/Li_Searching_for_Energy-Efficient_Hybrid_Adder-Convolution_Neural_Networks_CVPRW_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {1943--1952},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ferjani, Imen; Hidri, Minyar Sassi; Frihida, Ali
SiNoptiC: swarm intelligence optimisation of convolutional neural network architectures for text classification Journal Article
In: International Journal of Computer Applications in Technology, vol. 68, no. 1, pp. 82-100, 2022.
@article{doi:10.1504/IJCAT.2022.123237,
title = {SiNoptiC: swarm intelligence optimisation of convolutional neural network architectures for text classification},
author = {Imen Ferjani and Minyar Sassi Hidri and Ali Frihida},
url = {https://www.inderscienceonline.com/doi/abs/10.1504/IJCAT.2022.123237},
doi = {10.1504/IJCAT.2022.123237},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {International Journal of Computer Applications in Technology},
volume = {68},
number = {1},
pages = {82-100},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Jinyuan; Wu, Yuhui; Wu, Guanyao; Liu, Risheng; Fan, Xin
Learn to Search a Lightweight Architecture for Target-aware Infrared and Visible Image Fusion Journal Article
In: IEEE Signal Processing Letters, pp. 1-5, 2022.
@article{9789723,
title = {Learn to Search a Lightweight Architecture for Target-aware Infrared and Visible Image Fusion},
author = {Jinyuan Liu and Yuhui Wu and Guanyao Wu and Risheng Liu and Xin Fan},
url = {https://ieeexplore.ieee.org/abstract/document/9789723},
doi = {10.1109/LSP.2022.3180672},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Signal Processing Letters},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Zhuowei; Gao, Yibo; Zha, Zhenzhou; Hu, Zhiqiang; Xia, Qing; Zhang, Shaoting; Metaxas, Dimitris N.
Towards Self-supervised and Weight-preserving Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-04125,
title = {Towards Self-supervised and Weight-preserving Neural Architecture Search},
author = {Zhuowei Li and Yibo Gao and Zhenzhou Zha and Zhiqiang Hu and Qing Xia and Shaoting Zhang and Dimitris N. Metaxas},
url = {https://doi.org/10.48550/arXiv.2206.04125},
doi = {10.48550/arXiv.2206.04125},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.04125},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Du, Yipeng; Liu, Jian; Wang, Xiang; Wang, Peng
SSVEP based Emotion Recognition for IoT via Multiobjective Neural Architecture Search Journal Article
In: IEEE Internet of Things Journal, pp. 1-1, 2022.
@article{9793561,
title = {SSVEP based Emotion Recognition for IoT via Multiobjective Neural Architecture Search},
author = {Yipeng Du and Jian Liu and Xiang Wang and Peng Wang},
url = {https://ieeexplore.ieee.org/abstract/document/9793561},
doi = {10.1109/JIOT.2022.3180215},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Internet of Things Journal},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}