Publications
2022
Omair Ali; Muhammad Saif-ur-Rehman; Susanne Dyck; Tobias Glasmachers; Ioannis Iossifidis; Christian Klaes
Enhancing the decoding accuracy of EEG signals by the introduction of anchored-STFT and adversarial data augmentation method Journal Article
In: Nature Scientific Reports, vol. 12, iss. 1, pp. 4245, 2022, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: Adversarial NN, BCI, computer science, EEG, Machine Learning, Quantitative Biology, Quantitative Methods
@article{aliAnchoredSTFTGNAAExtension2021a,
title = {Enhancing the decoding accuracy of EEG signals by the introduction of anchored-STFT and adversarial data augmentation method},
author = {Omair Ali and Muhammad Saif-ur-Rehman and Susanne Dyck and Tobias Glasmachers and Ioannis Iossifidis and Christian Klaes},
url = {https://www.nature.com/articles/s41598-022-07992-w},
doi = {https://doi.org/10.1038/s41598-022-07992-w},
issn = {2045-2322},
year = {2022},
date = {2022-03-10},
urldate = {2022-03-10},
journal = {Nature Scientific Reports},
volume = {12},
issue = {1},
pages = {4245},
abstract = {Brain-computer interfaces (BCIs) enable communication between humans and machines by translating brain activity into control commands. Electroencephalography (EEG) signals are one of the most used brain signals in non-invasive BCI applications but are often contaminated with noise. Therefore, it is possible that meaningful patterns for classifying EEG signals are deeply hidden. State-of-the-art deep-learning algorithms are successful in learning hidden, meaningful patterns. However, the quality and the quantity of the presented inputs is pivotal. Here, we propose a novel feature extraction method called anchored Short Time Fourier Transform (anchored-STFT), which is an advanced version of STFT, as it minimizes the trade-off between temporal and spectral resolution presented by STFT. In addition, we propose a novel augmentation method, called gradient norm adversarial augmentation (GNAA). GNAA is not only an augmentation method but is also used to harness adversarial inputs in EEG data, which not only improves the classification accuracy but also enhances the robustness of the classifier. In addition, we also propose a new CNN architecture, namely Skip-Net, for the classification of EEG signals. The proposed pipeline outperforms all state-of-the-art methods and yields an average classification accuracy of 90.7 % and 89.54 % on BCI competition II dataset III and BCI competition IV dataset 2b, respectively.},
keywords = {Adversarial NN, BCI, computer science, EEG, Machine Learning, Quantitative Biology, Quantitative Methods},
pubstate = {published},
tppubtype = {article}
}
Brain-computer interfaces (BCIs) enable communication between humans and machines by translating brain activity into control commands. Electroencephalography (EEG) signals are one of the most used brain signals in non-invasive BCI applications but are often contaminated with noise. Therefore, it is possible that meaningful patterns for classifying EEG signals are deeply hidden. State-of-the-art deep-learning algorithms are successful in learning hidden, meaningful patterns. However, the quality and the quantity of the presented inputs is pivotal. Here, we propose a novel feature extraction method called anchored Short Time Fourier Transform (anchored-STFT), which is an advanced version of STFT, as it minimizes the trade-off between temporal and spectral resolution presented by STFT. In addition, we propose a novel augmentation method, called gradient norm adversarial augmentation (GNAA). GNAA is not only an augmentation method but is also used to harness adversarial inputs in EEG data, which not only improves the classification accuracy but also enhances the robustness of the classifier. In addition, we also propose a new CNN architecture, namely Skip-Net, for the classification of EEG signals. The proposed pipeline outperforms all state-of-the-art methods and yields an average classification accuracy of 90.7 % and 89.54 % on BCI competition II dataset III and BCI competition IV dataset 2b, respectively.