Publications
2010
Yulia Sandamirskaya; John Lipinski; Ioannis Iossifidis; G Schöner
Natural human-robot interaction through spatial language: a dynamic neural fields approach Inproceedings
In: Proc. 19th IEEE International Workshop on Robot and Human Interactive Communication (ROMAN 2010), pp. 600–607, IEEE, 2010, ISSN: 1944-9445.
Links | BibTeX | Tags: Autonomous robotics, behavior generation, dynamical systems, man machine interaction, movement model, speech recognition
@inproceedings{Sandamirskayasubmitted,
title = {Natural human-robot interaction through spatial language: a dynamic neural fields approach},
author = {Yulia Sandamirskaya and John Lipinski and Ioannis Iossifidis and G Schöner},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5598671},
issn = {1944-9445},
year = {2010},
date = {2010-01-01},
booktitle = {Proc. 19th IEEE International Workshop on Robot and Human Interactive Communication (ROMAN 2010)},
pages = {600--607},
publisher = {IEEE},
keywords = {Autonomous robotics, behavior generation, dynamical systems, man machine interaction, movement model, speech recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Stephan K U Zibner; Christian Faubel; Ioannis Iossifidis; Gregor Schöner
Scene Representation Based on Dynamic Field Theory: From Human to Machine Journal Article
In: Front. Comput. Neurosci. Conference Abstract: Bernstein Conference on Computational Neuroscience, 2010.
Links | BibTeX | Tags: dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition
@article{Zibner2010a,
title = {Scene Representation Based on Dynamic Field Theory: From Human to Machine},
author = {Stephan K U Zibner and Christian Faubel and Ioannis Iossifidis and Gregor Schöner},
doi = {10.3389/conf.fncom.2010.51.00019},
year = {2010},
date = {2010-01-01},
journal = {Front. Comput. Neurosci. Conference Abstract: Bernstein Conference on Computational Neuroscience},
keywords = {dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition},
pubstate = {published},
tppubtype = {article}
}
Stephan S K U Zibner; Christian Faubel; Ioannis Iossifidis; Gregor Schöner
Scene Representation for Anthropomorphic Robots: A Dynamic Neural Field Approach Inproceedings
In: ISR / ROBOTIK 2010, VDE VERLAG GmbH, Munich, Germany, 2010.
Abstract | Links | BibTeX | Tags: Autonomous robotics, dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition
@inproceedings{Zibner2010ab,
title = {Scene Representation for Anthropomorphic Robots: A Dynamic Neural Field Approach},
author = {Stephan S K U Zibner and Christian Faubel and Ioannis Iossifidis and Gregor Schöner},
url = {http://www.vde-verlag.de/proceedings-en/453273138.html},
year = {2010},
date = {2010-01-01},
booktitle = {ISR / ROBOTIK 2010},
number = {Isr},
publisher = {VDE VERLAG GmbH},
address = {Munich, Germany},
abstract = {An internal representation of a scene is essential to generate actions on scene objects. A stabilized storage of object location and features offers the flexibility to process queries phrased in human-based terms relating to objects, which may not be in the current camera view. Scene representation is therefore an internal representation of the surrounding world that is stabilized against head and body movement. It contains associated information about location and features of objects. Because objects and bodies move, scene representation is not a one-time process, but a constantly scene- adapting mechanism of scanning for, storing, updating, and deleting information.
Our novel architecture incorporates the generation of autonomous scanning sequences on real-time camera images. The head can then be oriented towards a selected object and the color feature can be extracted. Object location and feature information are associatively stored in a three-dimensional Dynamic Neural Field. Changes in the scene, even for multiple objects, can be tracked simultaneously. The stored information is used to generate behavior for cued recall. Cues can be table regions, features, or object labels. The robot demonstrates a successful recall by centering its gaze on the stated object.},
keywords = {Autonomous robotics, dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Our novel architecture incorporates the generation of autonomous scanning sequences on real-time camera images. The head can then be oriented towards a selected object and the color feature can be extracted. Object location and feature information are associatively stored in a three-dimensional Dynamic Neural Field. Changes in the scene, even for multiple objects, can be tracked simultaneously. The stored information is used to generate behavior for cued recall. Cues can be table regions, features, or object labels. The robot demonstrates a successful recall by centering its gaze on the stated object.
Stephan K U Zibner; Christian Faubel; John P Spencer; Ioannis Iossifidis; Gregor Schöner
Scenes and Tracking with Dynamic Neural Fields: How to Update a Robotic Scene Representation Inproceedings
In: Proc. Int. Conf. on Development and Learning (ICDL10), 2010.
BibTeX | Tags: Autonomous robotics, dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition
@inproceedings{Zibner2010c,
title = {Scenes and Tracking with Dynamic Neural Fields: How to Update a Robotic Scene Representation},
author = {Stephan K U Zibner and Christian Faubel and John P Spencer and Ioannis Iossifidis and Gregor Schöner},
year = {2010},
date = {2010-01-01},
booktitle = {Proc. Int. Conf. on Development and Learning (ICDL10)},
keywords = {Autonomous robotics, dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Stephan K U Zibner; Christian Faubel; Ioannis Iossifidis; Gregor Schöner
Scene Representation with Dynamic Neural Fields: An Example of Complex Cognitive Architectures Based on Dynamic Neural Field Theory Inproceedings
In: Proc. Int. Conf. on Development and Learning (ICDL10), 2010.
BibTeX | Tags: Autonomous robotics, dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition
@inproceedings{Zibnersubmittedb,
title = {Scene Representation with Dynamic Neural Fields: An Example of Complex Cognitive Architectures Based on Dynamic Neural Field Theory},
author = {Stephan K U Zibner and Christian Faubel and Ioannis Iossifidis and Gregor Schöner},
year = {2010},
date = {2010-01-01},
booktitle = {Proc. Int. Conf. on Development and Learning (ICDL10)},
keywords = {Autonomous robotics, dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Stephan Zibner; Christian Faubel; Ioannis Iossifidis; Gregor Schöner; John P Spencer
Scene and Tracking with Dynamic Neural Field Approach Inproceedings
In: ISR / ROBOTIK 2010, Munich, Germany, 2010.
Abstract | BibTeX | Tags: Autonomous robotics, dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition
@inproceedings{Zibneri,
title = {Scene and Tracking with Dynamic Neural Field Approach},
author = {Stephan Zibner and Christian Faubel and Ioannis Iossifidis and Gregor Schöner and John P Spencer},
year = {2010},
date = {2010-01-01},
booktitle = {ISR / ROBOTIK 2010},
address = {Munich, Germany},
abstract = {An internal representation of a scene is essential to generate actions on scene objects. A stabilized storage of object location and features offers the flexibility to process queries phrased in human-based terms relating to objects, which may not be in the current camera view. Scene representation is therefore an internal representation of the surrounding world that is stabilized against head and body movement. It contains associated information about location and features of objects. Because objects and bodies move, scene representation is not a one-time process, but a constantly scene- adapting mechanism of scanning for, storing, updating, and deleting information.
Our novel architecture incorporates the generation of autonomous scanning sequences on real-time camera images. The head can then be oriented towards a selected object and the color feature can be extracted. Object location and feature information are associatively stored in a three-dimensional Dynamic Neural Field. Changes in the scene, even for multiple objects, can be tracked simultaneously. The stored information is used to generate behavior for cued recall. Cues can be table regions, features, or object labels. The robot demonstrates a successful recall by centering its gaze on the stated object.},
keywords = {Autonomous robotics, dynamic neural field, dynamical systems, man machine interaction, scene representation, speech recognition},
pubstate = {published},
tppubtype = {inproceedings}
}
Our novel architecture incorporates the generation of autonomous scanning sequences on real-time camera images. The head can then be oriented towards a selected object and the color feature can be extracted. Object location and feature information are associatively stored in a three-dimensional Dynamic Neural Field. Changes in the scene, even for multiple objects, can be tracked simultaneously. The stored information is used to generate behavior for cued recall. Cues can be table regions, features, or object labels. The robot demonstrates a successful recall by centering its gaze on the stated object.
2002
Ioannis. Iossifidis; Carsten Bruckhoff; Christoph Theis; Claudia Grote; Christian Faubel; Gregor Schoner
CORA: An anthropomorphic robot assistant for human environment Inproceedings
In: Proc. 11th IEEE International Workshop on Robot and Human Interactive Communication, pp. 392–398, 2002.
Abstract | Links | BibTeX | Tags: assembling, Cooperative Robot Assistant, CORA, gesture recognition, haptic interfaces, household, industrial assembly, manipulators, object recognition, robot assistant, robots, speech recognition
@inproceedings{Iossifidis2002a,
title = {CORA: An anthropomorphic robot assistant for human environment},
author = {Ioannis. Iossifidis and Carsten Bruckhoff and Christoph Theis and Claudia Grote and Christian Faubel and Gregor Schoner},
doi = {10.1109/ROMAN.2002.1045654},
year = {2002},
date = {2002-01-01},
booktitle = {Proc. 11th IEEE International Workshop on Robot and Human Interactive Communication},
pages = {392--398},
abstract = {We describe the general concept, system architecture, hardware, and the behavioral abilities of CORA (Cooperative Robot Assistant), an autonomous nonmobile robot assistant. Outgoing from our basic assumption that the behavior to perform determines the internal and external structure of the behaving system, we have designed CORA anthropomorphic to allow for humanlike behavioral strategies in solving complex tasks. Although CORA was built as a prototype of a service robot system to assist a human partner in industrial assembly tasks, we will show that CORA's behavioral abilities are also conferrable in a household environment. After the description of the hardware platform and the basic concepts of our approach, we present some experimental results by means of an assembly task.},
keywords = {assembling, Cooperative Robot Assistant, CORA, gesture recognition, haptic interfaces, household, industrial assembly, manipulators, object recognition, robot assistant, robots, speech recognition},
pubstate = {published},
tppubtype = {inproceedings}
}