Publications
2022
Felix Grün; Ioannis Iossifidis
Exploring Distribution Parameterizations for Distributional Continuous Control Inproceedings
In: BC22 : Computational Neuroscience & Neurotechnology Bernstein Conference 2022, BCCN Bernstein Network Computational Network, 2022.
Links | BibTeX | Tags: Machine Learning, Reinforcement learning
@inproceedings{grunExploringDistributionParameterizations2022,
title = {Exploring Distribution Parameterizations for Distributional Continuous Control},
author = {Felix Grün and Ioannis Iossifidis},
doi = {10.12751/nncn.bc2022.112},
year = {2022},
date = {2022-09-15},
urldate = {2022-09-15},
booktitle = {BC22 : Computational Neuroscience & Neurotechnology Bernstein Conference 2022},
publisher = {BCCN Bernstein Network Computational Network},
keywords = {Machine Learning, Reinforcement learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Aline Xavier Fidencio; Christian Klaes; Ioannis Iossifidis
Error-Related Potentials in Reinforcement Learning-Based Brain-Machine Interfaces Journal Article
In: Frontiers in Human Neuroscience, vol. 16, 2022.
Abstract | Links | BibTeX | Tags: BCI, EEG, error-related potentials, Machine Learning, Reinforcement learning
@article{xavierfidencioErrorrelated,
title = {Error-Related Potentials in Reinforcement Learning-Based Brain-Machine Interfaces},
author = {Aline Xavier Fidencio and Christian Klaes and Ioannis Iossifidis},
url = {https://www.frontiersin.org/article/10.3389/fnhum.2022.806517},
doi = {https://doi.org/10.3389/fnhum.2022.806517},
year = {2022},
date = {2022-06-24},
urldate = {2022-06-24},
journal = {Frontiers in Human Neuroscience},
volume = {16},
abstract = {The human brain has been an object of extensive investigation in different fields. While several studies have focused on understanding the neural correlates of error processing, advances in brain-machine interface systems using non-invasive techniques further enabled the use of the measured signals in different applications. The possibility of detecting these error-related potentials (ErrPs) under different experimental setups on a single-trial basis has further increased interest in their integration in closed-loop settings to improve system performance, for example, by performing error correction. Fewer works have, however, aimed at reducing future mistakes or learning. We present a review focused on the current literature using non-invasive systems that have combined the ErrPs information specifically in a reinforcement learning framework to go beyond error correction and have used these signals for learning.},
keywords = {BCI, EEG, error-related potentials, Machine Learning, Reinforcement learning},
pubstate = {published},
tppubtype = {article}
}
2021
Felix Grün; Tobias Glasmachers; Ioannis Iossifidis
Off-Policy Continuous Control Using Distributional Reinforcement Learning Inproceedings
In: Bernstein Conference, 2021.
Links | BibTeX | Tags: Machine Learning, Reinforcement learning
@inproceedings{grunOffPolicyContinuousControl2021b,
title = {Off-Policy Continuous Control Using Distributional Reinforcement Learning},
author = {Felix Grün and Tobias Glasmachers and Ioannis Iossifidis},
doi = {10.12751/nncn.bc2021.p001},
year = {2021},
date = {2021-10-01},
urldate = {2021-10-01},
publisher = {Bernstein Conference},
keywords = {Machine Learning, Reinforcement learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Aline Xavier Fidencio; Tobias Glasmachers; Christian Klaes; Ioannis Iossifidis
Beyond Error Correction: Integration of Error-Related Potentials into Brain-Computer Interfaces for Improved Performance Inproceedings
In: Bernstein Conference, 2021.
Links | BibTeX | Tags: BCI, error-related potentials, Machine Learning, Reinforcement learning
@inproceedings{xavierfidencioErrorCorrectionIntegration2021b,
title = {Beyond Error Correction: Integration of Error-Related Potentials into Brain-Computer Interfaces for Improved Performance},
author = {Aline Xavier Fidencio and Tobias Glasmachers and Christian Klaes and Ioannis Iossifidis},
doi = {10.12751/nncn.bc2021.p163},
year = {2021},
date = {2021-10-01},
urldate = {2021-10-01},
publisher = {Bernstein Conference},
keywords = {BCI, error-related potentials, Machine Learning, Reinforcement learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Felix Grün; Tobias Glasmachers; Ioannis Iossifidis
Off-Policy Continuous Control Using Distributional Reinforcement Learning Inproceedings
In: BC21 : Computational Neuroscience & Neurotechnology Bernstein Conference 2021, BCCN Bernstein Network Computational Network, 2021.
Links | BibTeX | Tags: Machine Learning, Reinforcement learning
@inproceedings{grunOffPolicyContinuousControl2021,
title = {Off-Policy Continuous Control Using Distributional Reinforcement Learning},
author = {Felix Grün and Tobias Glasmachers and Ioannis Iossifidis},
doi = {10.12751/nncn.bc2021.p001},
year = {2021},
date = {2021-09-15},
urldate = {2021-09-15},
booktitle = {BC21 : Computational Neuroscience & Neurotechnology Bernstein Conference 2021},
publisher = {BCCN Bernstein Network Computational Network},
keywords = {Machine Learning, Reinforcement learning},
pubstate = {published},
tppubtype = {inproceedings}
}
A X Fidêncio; T Glasmachers; D Naro
Application of Reinforcement Learning to a Mining System Inproceedings
In: 2021 IEEE 19th World Symposium on Applied Machine Intelligence and Informatics (SAMI), pp. 000111–000118, 2021.
Abstract | Links | BibTeX | Tags: Control Applications, Industrial Application, Machine Learning, Machine learning algorithms, Mining Industry, Reinforcement learning
@inproceedings{fidencioApplicationReinforcementLearning2021,
title = {Application of Reinforcement Learning to a Mining System},
author = {A X Fidêncio and T Glasmachers and D Naro},
doi = {10.1109/SAMI50585.2021.9378663},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {2021 IEEE 19th World Symposium on Applied Machine Intelligence and Informatics (SAMI)},
pages = {000111--000118},
abstract = {Automation techniques have been widely applied in different industry segments, among others, to increase both productivity and safety. In the mining industry, with the usage of such systems, the operator can be removed from hazardous environments without compromising task execution and it is possible to achieve more efficient and standardized operation. In this work a study case on the application of machine learning algorithms to a mining system example is presented, in which reinforcement learning algorithms were used to solve a control problem. As an example, a machine chain consisting of a Bucket Wheel Excavator, a Belt Wagon and a Hopper Car was used. This system has two material transfer points that need to remain aligned during operation in order to allow continuous material flow. To keep the alignment, the controller makes use of seven degrees of freedom given by slewing, luffing and crawler drives. Experimental tests were done in a simulated environment with two state-of-the-art algorithms, namely Proximal Policy Optimization (PPO) and Soft Actor-Critic (SAC). The trained agents were evaluated in terms of episode return and length, as well as alignment quality and action values used. Results show that, for the given task, the PPO agent performs quantitatively and qualitatively better than the SAC agent. However, none of the agents were able to completely solve the proposed testing task.},
keywords = {Control Applications, Industrial Application, Machine Learning, Machine learning algorithms, Mining Industry, Reinforcement learning},
pubstate = {published},
tppubtype = {inproceedings}
}