Publications

Journal articles

  1. Fusion of facial expressions and EEG for implicit affective tagging. pdf bibtex S. Koelstra and I. Patras. In Image and Vision Computing, 2012.
    @article{koelstra2012fusion,
      title={Fusion of facial expressions and EEG for implicit affective tagging},
      author={S. Koelstra and I. Patras},
      journal={Image and Vision Computing},
      year={2012},
      publisher={Elsevier}
    }
  2. Deap: A database for emotion analysis; using physiological signals. pdf bibtex S. Koelstra, C. Muehl, M. Soleymani, J.-S. Lee, A. Yazdani, T. Ebrahimi, T. Pun, A. Nijholt and I. Patras. In Affective Computing, IEEE Transactions on, vol. 3, number 1, 2012.
    @article{koelstra2012deap,
      title={Deap: A database for emotion analysis; using physiological signals},
      author={S. Koelstra, C. Muehl, M. Soleymani, J.-S. Lee, A. Yazdani, T. Ebrahimi, T. Pun, A. Nijholt and I. Patras},
      journal={Affective Computing, IEEE Transactions on},
      volume={3},
      number={1},
      pages={18--31},
      year={2012},
      publisher={IEEE}
    }
  3. A Dynamic Texture based Approach to Recognition of Facial Actions and their Temporal Models. pdf bibtex S. Koelstra, M. Pantic and I. Patras. In IEEE Trans. Pattern Analysis and Machine Intelligence, vol. 32, number 11, 2010.
    @article{Koelstra10,
      author = {S. Koelstra, M. Pantic and I. Patras},
      title = {A Dynamic Texture based Approach to Recognition of Facial Actions and their Temporal Models},
      journal = {IEEE Trans. Pattern Analysis and Machine Intelligence},
      pages={1940--1954},
      year={2010},
      volume={32},
      number={11},
      abstract = "In this work we propose a dynamic-texture-based approach to the recognition of facial Action Units (AUs, atomic facial gestures) and their temporal models (i.e., sequences of temporal segments: neutral, onset, apex, and offset) in near-frontal-view face videos. Two approaches to modelling the dynamics and the appearance in the face region of an input video are compared: an extended version of Motion History Images and a novel method based on Non-rigid Registration using Free-Form Deformations (FFDs). The extracted motion representation is used to derive motion orientation histogram descriptors in both the spatial and temporal domain. Per AU, a combination of discriminative, frame-based GentleBoost ensemble learners and dynamic, generative Hidden Markov Models detects the presence of the AU in question and its temporal segments in an input image sequence. When tested for recognition of all 27 lower and upper face AUs, occurring alone or in combination in 264 sequences from the MMI facial expression database, the proposed method achieved an average event recognition accuracy of 89.2\% for the MHI method and of 94.3\% for the FFD method. The generalization performance of the FFD method has been tested using the Cohn-Kanade database. Finally, we also explored the performance on spontaneous expressions in the Sensitive Artificial Listener dataset.",
    }

Conference articles

  1. Continuous Emotion Detection in Response to Music Videos. pdf bibtex M. Soleymani, S. Koelstra, I. Patras and T. Pun. In International Workshop on Emotion Synthesis, rePresentation, and Analysis in Continuous spacE (EmoSPACE) In conjunction with the IEEE FG 2011, 2011.
    @inproceedings{Soleymani,
    author = {M. Soleymani, S. Koelstra, I. Patras and T. Pun},
    booktitle = {International Workshop on Emotion Synthesis, rePresentation, and Analysis in Continuous spacE (EmoSPACE) In conjunction with the IEEE FG 2011},
    title = {{Continuous Emotion Detection in Response to Music Videos}},
    pages = {803-808},
    year = {2011},
    abstract={We present a multimodal dataset for the analysis of human affective states. The  electroencephalogram (EEG) and peripheral physiological signals of 32 participants were recorded as each watched 40 one-minute long excerpts of music videos. Participants rated each video in terms of the levels of arousal, valence, like/dislike, dominance and familiarity. For 22 of the 32 participants, frontal face video was also recorded. A novel method for stimuli selection is proposed using retrieval by affective tags from the last.fm website, video highlight detection and an online assessment tool. An extensive analysis of the participants' ratings during the experiment is presented. Correlates between the EEG signal frequencies and the participants' ratings are investigated. Methods and results are presented for single-trial classification of arousal, valence and like/dislike ratings using the modalities of EEG, peripheral physiological signals and multimedia content analysis. Finally, decision fusion of the classification results from the different modalities is performed. The dataset is made publicly available and we encourage other researchers to use it for testing their own affective state estimation methods.}
    }
  2. Single trial classification of EEG and peripheral physiological signals for recognition of emotions induced by music videos. pdf bibtex S. Koelstra, A. Yazdani, M. Soleymani, C. Muehl, J.-S. Lee, A. Nijholt, T. Pun, T. Ebrahimi and I. Patras. In Brain Informatics, 2010.
    @inproceedings{koelstra2010single,
      title={Single trial classification of EEG and peripheral physiological signals for recognition of emotions induced by music videos},
      author={S. Koelstra, A. Yazdani, M. Soleymani, C. Muehl, J.-S. Lee, A. Nijholt, T. Pun, T. Ebrahimi and I. Patras},
      booktitle={Brain Informatics},
      pages={89--100},
      year={2010},
      publisher={Springer}
    }
  3. EEG analysis for implicit tagging of video data. pdf bibtex S. Koelstra, C. Muehl, and I. Patras. In Affective Computing and Intelligent Interaction and Workshops, 2009. ACII 2009. 3rd International Conference on, 2009.
    @inproceedings{koelstra2009eeg,
      title={EEG analysis for implicit tagging of video data},
      author={S. Koelstra, C. Muehl, and I. Patras},
      booktitle={Affective Computing and Intelligent Interaction and Workshops, 2009. ACII 2009. 3rd International Conference on},
      pages={1--6},
      year={2009},
      organization={IEEE}
    }
  4. The FAST-3D spatio-temporal interest region detector. pdf bibtex S. Koelstra, and I. Patras. In Image Analysis for Multimedia Interactive Services, 2009. WIAMIS'09. 10th Workshop on, 2009.
    @inproceedings{koelstra2009fast,
      title={The FAST-3D spatio-temporal interest region detector},
      author={S. Koelstra, and I. Patras},
      booktitle={Image Analysis for Multimedia Interactive Services, 2009. WIAMIS'09. 10th Workshop on},
      pages={242--245},
      year={2009},
      organization={IEEE}
    }
    
  5. Non-rigid registration using free-form deformations for recognition of facial actions and their temporal dynamics. pdf bibtex S. Koelstra and M. Pantic. In Automatic Face & Gesture Recognition, 2008. FG'08. 8th IEEE International Conference on, 2008.
    @inproceedings{koelstra2008non,
      title={Non-rigid registration using free-form deformations for recognition of facial actions and their temporal dynamics},
      author={S. Koelstra and M. Pantic},
      booktitle={Automatic Face \& Gesture Recognition, 2008. FG'08. 8th IEEE International Conference on},
      pages={1-8},
      year={2008},
      organization={IEEE}
    }

Theses

  1. Affective and Implicit Tagging using Facial Expressions and Electroencephalography. pdf bibtex S. Koelstra. Queen Mary University of London, 2012. PhD Thesis
    @PhdThesis{Koelstra2012affective,
        title = "{Affective and Implicit Tagging using Facial Expressions and Electroencephalography}",
        author = "S. Koelstra",
        institution = "Queen Mary University of London",
        month = march,
        year = "2012",
        note = "PhD Thesis",
    }
  2. Using appearance-based features in the recognition of facial actions and their temporal dynamics. bibtex S. Koelstra. Delft University of Technology, 2007. MSc. Thesis
    @mscthesis{Koelstra2007msc,
            author = "S. Koelstra",
            title = "Using appearance-based features in the recognition of facial actions and their temporal dynamics",
            institution = "Delft University of Technology",
            year = {2007},
            note = "MSc. Thesis"
    }
  3. Aibo Messenger. bibtex S. Koelstra and E. Jacobs. Delft University of Technology, 2005. BSc. Thesis
    @bscthesis{Koelstra2005bsc,
            author = "S. Koelstra and E. Jacobs",
            title = "Aibo Messenger",
            institution = "Delft University of Technology",
            year = {2005},
            note = "BSc. Thesis",
    }