<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR AI</journal-id>
      <journal-title>JMIR AI</journal-title>
      <issn pub-type="epub">2817-1705</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v3i1e52171</article-id>
      <article-id pub-id-type="pmid">38875573</article-id>
      <article-id pub-id-type="doi">10.2196/52171</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>A Comparison of Personalized and Generalized Approaches to Emotion Recognition Using Consumer Wearable Devices: Machine Learning Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>El Emam</surname>
            <given-names>Khaled</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Malin</surname>
            <given-names>Bradley</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Pandey</surname>
            <given-names>Sachin</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zhou</surname>
            <given-names>Mo</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Vos</surname>
            <given-names>Gideon</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Li</surname>
            <given-names>Joe</given-names>
          </name>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0009-6834-6810</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Washington</surname>
            <given-names>Peter</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Information and Computer Sciences</institution>
            <institution>University of Hawai`i at Mānoa</institution>
            <addr-line>1680 East-West Road, Room 312</addr-line>
            <addr-line>Honolulu, HI, 96822</addr-line>
            <country>United States</country>
            <phone>1 000000000</phone>
            <email>pyw@hawaii.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3276-4411</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Information and Computer Sciences</institution>
        <institution>University of Hawai`i at Mānoa</institution>
        <addr-line>Honolulu, HI</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Peter Washington <email>pyw@hawaii.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>10</day>
        <month>5</month>
        <year>2024</year>
      </pub-date>
      <volume>3</volume>
      <elocation-id>e52171</elocation-id>
      <history>
        <date date-type="received">
          <day>25</day>
          <month>8</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>19</day>
          <month>9</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>19</day>
          <month>2</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>23</day>
          <month>3</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Joe Li, Peter Washington. Originally published in JMIR AI (https://ai.jmir.org), 10.05.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on https://www.ai.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://ai.jmir.org/2024/1/e52171" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>There are a wide range of potential adverse health effects, ranging from headaches to cardiovascular disease, associated with long-term negative emotions and chronic stress. Because many indicators of stress are imperceptible to observers, the early detection of stress remains a pressing medical need, as it can enable early intervention. Physiological signals offer a noninvasive method for monitoring affective states and are recorded by a growing number of commercially available wearables.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>We aim to study the differences between personalized and generalized machine learning models for 3-class emotion classification (neutral, stress, and amusement) using wearable biosignal data.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We developed a neural network for the 3-class emotion classification problem using data from the Wearable Stress and Affect Detection (WESAD) data set, a multimodal data set with physiological signals from 15 participants. We compared the results between a participant-exclusive generalized, a participant-inclusive generalized, and a personalized deep learning model.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>For the 3-class classification problem, our personalized model achieved an average accuracy of 95.06% and an <italic>F</italic><sub>1</sub>-score of 91.71%; our participant-inclusive generalized model achieved an average accuracy of 66.95% and an <italic>F</italic><sub>1</sub>-score of 42.50%; and our participant-exclusive generalized model achieved an average accuracy of 67.65% and an <italic>F</italic><sub>1</sub>-score of 43.05%.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Our results emphasize the need for increased research in personalized emotion recognition models given that they outperform generalized models in certain contexts. We also demonstrate that personalized machine learning models for emotion classification are viable and can achieve high performance.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>affect detection</kwd>
        <kwd>affective computing</kwd>
        <kwd>deep learning</kwd>
        <kwd>digital health</kwd>
        <kwd>emotion recognition</kwd>
        <kwd>machine learning</kwd>
        <kwd>mental health</kwd>
        <kwd>personalization</kwd>
        <kwd>stress detection</kwd>
        <kwd>wearable technology</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Stress and negative affect can have long-term consequences for physical and mental health, such as chronic illness, higher mortality rates, and major depression [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. Therefore, the early detection and corresponding intervention of stress and negative emotions greatly reduces the risk of detrimental health conditions appearing later in life [<xref ref-type="bibr" rid="ref4">4</xref>]. Since negative stress and affect can be difficult for humans to observe [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>], automated emotion recognition models can play an important role in health care. Affective computing can also facilitate digital therapy and advance the development of assistive technologies for autism [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref13">13</xref>].</p>
      <p>Physiological signals, including electrocardiography (ECG), electrodermal activity (EDA), and photoplethysmography (PPG), have been shown to be robust indicators of emotions [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. The noninvasive nature of physiological signal measurement makes it a practical and convenient method for emotion recognition. Wearable devices such as smartwatches have become increasingly popular, and products such as Fitbit have already integrated the sensing of heart rate, ECG, and EDA data into their smartwatches. The accessibility of wearable devices indicates that an emotion recognition model using biosignals can have practical applications in health care.</p>
      <p>The vast majority of research in recognizing emotions from biosignals involves machine learning models that are generalizable, which means that the models were trained on one group of subjects and tested on a separate group of subjects [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref28">28</xref>]. Prior studies emphasize the need for personalized or subject-dependent models [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>], and some investigations, albeit few, analyze personalized models [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. Both generalized and personalized models have potential benefits; for example, generalized models can train on more data than personalized models, and personalized models do not need to address the problem of inter-subject data variance [<xref ref-type="bibr" rid="ref33">33</xref>]. However, it is still unclear how personalized models compare against generalized models in many contexts.</p>
      <p>We present 1 personalized and 2 generalized machine learning approaches for the 3-class emotion classification problem (neutral, stress, and amusement) on the Wearable Stress and Affect Detection (WESAD) data set, a publicly available data set that includes both stress and emotion data [<xref ref-type="bibr" rid="ref18">18</xref>]. The two generalized models are trained using participant-inclusive and participant-exclusive procedures. We compare the performance of these 3 models, finding that the personalized machine learning approach consistently outperforms the generalized approach on the WESAD data set.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Overview</title>
        <p>To classify physiological data into the neutral, stress, and amusement classes, we developed a machine learning framework and evaluated the framework using data from the WESAD data set. Our machine learning framework consists of data preprocessing, a convolutional encoder for feature extraction, and a feedforward neural network for supervised prediction (<xref rid="figure1" ref-type="fig">Figure 1</xref>). Using this model architecture, we compared generalized and personalized approaches to the 3-class emotion classification task (neutral, stress, and amusement).</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Overview of our model architecture for the 3-class emotion classification task. FNN: feedforward neural network; SiLU: sigmoid linear unit.</p>
          </caption>
          <graphic xlink:href="ai_v3i1e52171_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Data Set</title>
        <p>We selected WESAD, a publicly available data set that combines both stress and emotion annotations. WESAD consists of multimodal physiological data in the form of continuous time-series data for 15 participants and corresponding annotations of 4 affective states: neutral, stress, amusement, and meditation. However, we only considered the neutral, stress, and amusement classes since the objective of WESAD is to provide data for the 3-class classification problem, and the benchmark model in WESAD ignores the meditation state as well. Our model incorporated data from 8 modalities recorded in WESAD: ECG, EDA, electromyogram (EMG), respiration, temperature, and acceleration (x, y, and z axes). In the data set, measurements for each of the 8 modalities were sampled by a RespiBAN sensor at 700 Hz to enforce uniformity, and data were collected for approximately 36 minutes per participant.</p>
      </sec>
      <sec>
        <title>Preprocessing and Partitioning</title>
        <p>Each data modality was normalized with a mean of 0 and an SD of 1. We used a sliding window algorithm to partition each modality into intervals consisting of 64 data points, with a 50% overlap between consecutive intervals. We ensured that all 64 data points within an interval shared a common annotation, which allowed us to assign a single affective state to each interval. The process of normalization, followed by a sliding window partition, is illustrated in <xref rid="figure1" ref-type="fig">Figure 1</xref>. These intervals were partitioned into training, validation, and testing sets.</p>
        <p>For the personalized model, we partitioned the training, validation, and testing sets as follows: each participant in the data set had their own model that was trained, validated, and tested independently of other participants. For each affective state (neutral, stress, and amusement), we allocated the initial 70% of intervals with that affective state for training, the next 15% for validation, and the final 15% for testing. This guaranteed that the relative frequencies of each affective state were consistent across all 3 sets. Simply using the first 70% of all intervals for the training data would skew the distribution of affective states, given the nature of the WESAD data set. Furthermore, our partitioning of intervals according to sequential time order rather than random selection helped prevent overfitting by guaranteeing that 2 adjacent intervals with similar features would be in the same set. The partitioning of training, validation, and testing sets for the personalized model is shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>A comparison of different generalized and personalized approaches to the 3-class emotion classification task. The participant-exclusive generalized model mimics generalized approaches used in other papers. The participant-exclusive generalized model shown in the figure differs from what we use in this paper.</p>
          </caption>
          <graphic xlink:href="ai_v3i1e52171_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Standard generalized models partition the training, validation, and testing sets by participant [<xref ref-type="bibr" rid="ref18">18</xref>]. We denote these standard models as participant-exclusive generalized models, as shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>. Through this partitioning method, it is impossible to compare the performances of generalized and personalized models since they are solving two separate tasks. Therefore, we present a modified participant-exclusive generalized model that solves the same task as the personalized model. The testing set for our participant-exclusive generalized model consisted of the last 15% of intervals for each affective state for 1 participant. The training set consisted of the first 70% of intervals for each affective state for all participants except the 1 participant in the testing set, and the validation set consisted of the next 15% of intervals for all participants except the 1 participant in the testing set. The training and testing sets for this approach contained data from mutually exclusive sets of participants; this is where the name of the model, participant-exclusive, is derived from. Since the testing sets for the participant-exclusive generalized and personalized models are equivalent, it is possible to compare generalized and personalized approaches. This participant-exclusive generalized model served as our first generalized model baseline.</p>
        <p>A second generalized model baseline was created, called the participant-inclusive generalized model. Like the testing sets for the participant-exclusive generalized and personalized models, the testing set for this model contained the last 15% of intervals for each affective state for a single participant. The training set consisted of the first 70% of intervals for each affective state for all participants, and the validation set consisted of the next 15%. The set of participants in the training and testing sets overlapped by 1 participant—the subject in the testing set—which is why this model is called the participant-inclusive generalized model. This is illustrated in <xref rid="figure2" ref-type="fig">Figure 2</xref>.</p>
      </sec>
      <sec>
        <title>Model Architecture</title>
        <p>The model architecture consisted of an encoder network followed by a feedforward head, which is shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>. A total of 8 channels, representing the 8 modalities we used from WESAD, served as input into an encoder network, which was modeled after the encoder section of U-Net [<xref ref-type="bibr" rid="ref34">34</xref>]. The encoder network had 3 blocks, with each block consisting of two 1D convolutional layers (kernel size of 3) followed by 1D max pooling (kernel size of 2). The output of each convolution operation was passed through a sigmoid linear unit (SiLU) activation function. Between each block, we doubled the number of channels and added a dropout layer (15%) to reduce overfitting. The output of the encoder was flattened and passed through 2 fully connected layers with SiLU activation to produce a 3-class probability distribution. <xref ref-type="table" rid="table1">Table 1</xref> shows the hyperparameters that determine the model structure. These were consistent between the participant-exclusive generalized, participant-inclusive generalized, and personalized models.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Hyperparameters relating to model structure.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="500"/>
            <col width="500"/>
            <thead>
              <tr valign="top">
                <td>Hyperparameter</td>
                <td>Value</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Encoder depth (number of blocks), n</td>
                <td>3</td>
              </tr>
              <tr valign="top">
                <td>Dropout rate, %</td>
                <td>15</td>
              </tr>
              <tr valign="top">
                <td>Number of fully connected layers, n</td>
                <td>2</td>
              </tr>
              <tr valign="top">
                <td>Convolutional kernel size, n</td>
                <td>3</td>
              </tr>
              <tr valign="top">
                <td>Max pooling kernel size, n</td>
                <td>2</td>
              </tr>
              <tr valign="top">
                <td>Activation function</td>
                <td>SiLU<sup>a</sup></td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>SiLU: sigmoid linear unit.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Model Training</title>
        <p>We trained the 2 generalized baseline models and the personalized model under the same hyperparameters to guarantee a fair comparison. Both models were trained with cross-entropy loss using AdamW optimization. All models were written using PyTorch [<xref ref-type="bibr" rid="ref35">35</xref>]. Within 1000 epochs, models with the lowest validation loss were saved for testing. A Nvidia GeForce RTX 4090 GPU was used for training. A separate personalized model was trained for each of the 15 participants. The participant-exclusive generalized model was trained 15 times, and the participant-inclusive generalized model was trained once. For model comparison, all models were tested on each of the 15 participants.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This study did not require institutional review board (IRB) review because we exclusively used a commonly analyzed publicly available data set. We did not work with any human subjects.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>For the 3-class emotion classification task (neutral, stress, and amusement), <xref ref-type="table" rid="table2">Tables 2</xref> and <xref ref-type="table" rid="table3">3</xref> illustrate the accuracy and <italic>F</italic><sub>1</sub>-score of the personalized and generalized models when tested on each of the 15 participants. We include <italic>F</italic><sub>1</sub>-score, a balanced evaluation metric consisting of the harmonic mean of precision and recall, to accommodate for the imbalanced class distribution in WESAD [<xref ref-type="bibr" rid="ref18">18</xref>]. In order to guarantee a fair comparison between the models, they had the same random seeds for model initialization, and their architecture and hyperparameters were the same. The accuracy and <italic>F</italic><sub>1</sub>-score for the personalized model exceeded those of the participant-inclusive generalized model for all participants except participant 1, and the personalized model outperformed the participant-exclusive generalized model in terms of accuracy and <italic>F</italic><sub>1</sub>-score for all participants. The personalized models for participants 1 and 2 also indicate subpar performance compared to other participants, which we address in the Discussion section.</p>
      <p><xref ref-type="table" rid="table4">Table 4</xref> shows the average and SD of the accuracies and <italic>F</italic><sub>1</sub>-scores across all participants for the 3 models. We achieved an average accuracy of 95.06%, 66.95%, and 67.65% for the personalized, participant-inclusive generalized, and participant-exclusive generalized models, respectively. We also achieved an average <italic>F</italic><sub>1</sub>-score of 91.72%, 42.50%, and 43.05% for the personalized, participant-inclusive generalized, and participant-exclusive generalized models, respectively. Observing the error margins in <xref ref-type="table" rid="table4">Table 4</xref>, the differences in accuracy and <italic>F</italic><sub>1</sub>-score between the personalized model and both generalized models are statistically significant. As shown in <xref ref-type="table" rid="table5">Table 5</xref>, we evaluated the <italic>P</italic> values between each model type for accuracy and <italic>F</italic><sub>1</sub>-score through pairwise 2-tailed <italic>t</italic> tests to determine statistical significance.</p>
      <table-wrap position="float" id="table2">
        <label>Table 2</label>
        <caption>
          <p>A comparison of model accuracy between the personalized and generalized models.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="130"/>
          <col width="230"/>
          <col width="330"/>
          <col width="310"/>
          <thead>
            <tr valign="top">
              <td>Participant</td>
              <td colspan="3">Model accuracy, %</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Personalized model</td>
              <td>Participant-inclusive generalized model</td>
              <td>Participant-exclusive generalized model</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>1</td>
              <td>68.36</td>
              <td>82.69</td>
              <td>53.94</td>
            </tr>
            <tr valign="top">
              <td>2</td>
              <td>82.32</td>
              <td>67.12</td>
              <td>81.91</td>
            </tr>
            <tr valign="top">
              <td>3</td>
              <td>99.99</td>
              <td>82.81</td>
              <td>82.81</td>
            </tr>
            <tr valign="top">
              <td>4</td>
              <td>99.90</td>
              <td>82.86</td>
              <td>82.31</td>
            </tr>
            <tr valign="top">
              <td>5</td>
              <td>98.02</td>
              <td>82.94</td>
              <td>74.67</td>
            </tr>
            <tr valign="top">
              <td>6</td>
              <td>99.57</td>
              <td>54.57</td>
              <td>54.03</td>
            </tr>
            <tr valign="top">
              <td>7</td>
              <td>100.00</td>
              <td>82.05</td>
              <td>83.23</td>
            </tr>
            <tr valign="top">
              <td>8</td>
              <td>100.00</td>
              <td>53.72</td>
              <td>53.70</td>
            </tr>
            <tr valign="top">
              <td>9</td>
              <td>100.00</td>
              <td>51.86</td>
              <td>51.83</td>
            </tr>
            <tr valign="top">
              <td>10</td>
              <td>93.69</td>
              <td>82.05</td>
              <td>79.85</td>
            </tr>
            <tr valign="top">
              <td>11</td>
              <td>100.00</td>
              <td>60.86</td>
              <td>62.11</td>
            </tr>
            <tr valign="top">
              <td>12</td>
              <td>98.34</td>
              <td>53.53</td>
              <td>53.60</td>
            </tr>
            <tr valign="top">
              <td>13</td>
              <td>99.81</td>
              <td>53.26</td>
              <td>65.35</td>
            </tr>
            <tr valign="top">
              <td>14</td>
              <td>100.00</td>
              <td>53.47</td>
              <td>53.54</td>
            </tr>
            <tr valign="top">
              <td>15</td>
              <td>85.83</td>
              <td>60.43</td>
              <td>81.91</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <table-wrap position="float" id="table3">
        <label>Table 3</label>
        <caption>
          <p>A comparison of F<sub>1</sub>-score between the personalized and generalized models.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="130"/>
          <col width="230"/>
          <col width="330"/>
          <col width="310"/>
          <thead>
            <tr valign="top">
              <td>Participant</td>
              <td colspan="3"><italic>F</italic><sub>1</sub>-score, %</td>
            </tr>
            <tr valign="top">
              <td>
                <break/>
              </td>
              <td>Personalized model</td>
              <td>Participant-inclusive generalized model</td>
              <td>Participant-exclusive generalized model</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>1</td>
              <td>58.14</td>
              <td>61.91</td>
              <td>23.36</td>
            </tr>
            <tr valign="top">
              <td>2</td>
              <td>58.88</td>
              <td>44.55</td>
              <td>58.53</td>
            </tr>
            <tr valign="top">
              <td>3</td>
              <td>99.98</td>
              <td>62.05</td>
              <td>62.05</td>
            </tr>
            <tr valign="top">
              <td>4</td>
              <td>99.87</td>
              <td>61.95</td>
              <td>61.50</td>
            </tr>
            <tr valign="top">
              <td>5</td>
              <td>96.87</td>
              <td>61.99</td>
              <td>54.74</td>
            </tr>
            <tr valign="top">
              <td>6</td>
              <td>99.35</td>
              <td>24.94</td>
              <td>23.59</td>
            </tr>
            <tr valign="top">
              <td>7</td>
              <td>100.00</td>
              <td>61.16</td>
              <td>62.09</td>
            </tr>
            <tr valign="top">
              <td>8</td>
              <td>100.00</td>
              <td>23.38</td>
              <td>23.29</td>
            </tr>
            <tr valign="top">
              <td>9</td>
              <td>100.00</td>
              <td>22.85</td>
              <td>22.89</td>
            </tr>
            <tr valign="top">
              <td>10</td>
              <td>94.29</td>
              <td>61.04</td>
              <td>59.23</td>
            </tr>
            <tr valign="top">
              <td>11</td>
              <td>100.00</td>
              <td>38.27</td>
              <td>40.15</td>
            </tr>
            <tr valign="top">
              <td>12</td>
              <td>97.40</td>
              <td>26.79</td>
              <td>26.90</td>
            </tr>
            <tr valign="top">
              <td>13</td>
              <td>99.75</td>
              <td>24.47</td>
              <td>44.63</td>
            </tr>
            <tr valign="top">
              <td>14</td>
              <td>100.00</td>
              <td>23.93</td>
              <td>24.09</td>
            </tr>
            <tr valign="top">
              <td>15</td>
              <td>71.28</td>
              <td>38.26</td>
              <td>58.71</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <table-wrap position="float" id="table4">
        <label>Table 4</label>
        <caption>
          <p>Average accuracy and F<sub>1</sub>-score of models across all participants.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="500"/>
          <col width="250"/>
          <col width="250"/>
          <thead>
            <tr valign="top">
              <td>Model type</td>
              <td>Accuracy, mean (SD [%])</td>
              <td><italic>F</italic><sub>1</sub>-score, mean (SD [%])</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Personalized</td>
              <td>95.06 (9.24)</td>
              <td>91.72 (15.33)</td>
            </tr>
            <tr valign="top">
              <td>Participant-inclusive generalized</td>
              <td>66.95 (13.76)</td>
              <td>42.50 (17.37)</td>
            </tr>
            <tr valign="top">
              <td>Participant-exclusive generalized</td>
              <td>67.65 (13.48)</td>
              <td>43.05 (17.20)</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <table-wrap position="float" id="table5">
        <label>Table 5</label>
        <caption>
          <p><italic>P</italic> values of accuracy and F<sub>1</sub>-score comparisons between model types.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="500"/>
          <col width="250"/>
          <col width="250"/>
          <thead>
            <tr valign="top">
              <td>Model comparison</td>
              <td><italic>P</italic> value for accuracy</td>
              <td><italic>P</italic> value for <italic>F</italic><sub>1</sub>-score</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Personalized versus participant-inclusive generalized</td>
              <td><italic>P&#60;</italic>.001</td>
              <td><italic>P&#60;</italic>.001</td>
            </tr>
            <tr valign="top">
              <td>Personalized versus participant-exclusive generalized</td>
              <td><italic>P&#60;</italic>.001</td>
              <td><italic>P&#60;</italic>.001</td>
            </tr>
            <tr valign="top">
              <td>Participant-inclusive generalized versus participant-exclusive generalized</td>
              <td>.81</td>
              <td>.88</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>We demonstrated that a personalized deep learning model outperforms a generalized model in both the accuracy and <italic>F</italic><sub>1</sub>-score metrics for the 3-class emotion classification task. By establishing two generalized model baselines through the participant-inclusive and participant-exclusive models, we created an alternative approach to the standard generalization technique of separating the training and testing sets by participant, and as a result, we were able to compare personalized and generalized approaches. Our personalized model achieved an accuracy of 95.06% and an <italic>F</italic><sub>1</sub>-score of 91.72%, while our participant-inclusive generalized model achieved an accuracy of 66.95% and an <italic>F</italic><sub>1</sub>-score of 42.50% and our participant-exclusive generalized model achieved an accuracy of 67.65% and an <italic>F</italic><sub>1</sub>-score of 43.05%.</p>
        <p>Our work indicates that personalized models for emotion recognition should be further explored in the realm of health care. Machine learning methods for emotion classification are clearly viable and can achieve high accuracy, as shown by our personalized model. Furthermore, given that numerous wearable technologies collect physiological signals, data acquisition is both straightforward and noninvasive. Combined with the popularity of consumer wearable technology, it is feasible to scale emotion recognition systems. This can ultimately play a major role in the early detection of stress and negative emotions, thus serving as a preventative measure for serious health problems.</p>
      </sec>
      <sec>
        <title>Comparison With Previous Work</title>
        <sec>
          <title>Generalized Models</title>
          <p>The vast majority of prior studies using WESAD developed generalized approaches to the emotion classification task. Schmidt et al [<xref ref-type="bibr" rid="ref18">18</xref>], the pioneers of WESAD, created several feature extraction models and achieved accuracies up to 80% for the 3-class classification task. Huynh et al [<xref ref-type="bibr" rid="ref22">22</xref>] developed a deep neural network, trained on WESAD wrist signals, to outperform past approaches by 8.22%. Albaladejo-González et al [<xref ref-type="bibr" rid="ref36">36</xref>] achieved an <italic>F</italic><sub>1</sub>-score of 88.89% using an unsupervised local outlier factor model and 99.03% using a supervised multilayer perceptron. Additionally, they analyzed the transfer learning capabilities of different models between the WESAD and SWELL-KW (SWELL knowledge work) [<xref ref-type="bibr" rid="ref37">37</xref>] data sets. Ghosh et al [<xref ref-type="bibr" rid="ref38">38</xref>] achieved 94.8% accuracy using WESAD chest data by encoding time-series data into Gramian Angular Field images and employing deep learning techniques. Bajpai et al [<xref ref-type="bibr" rid="ref39">39</xref>] investigated the k-nearest neighbor algorithm to explore the tradeoff between performance and the total number of nearest neighbors using WESAD. Through federated learning, Almadhor et al [<xref ref-type="bibr" rid="ref40">40</xref>] achieved 86.82% accuracy on data in WESAD using a deep neural network. Behinaein et al [<xref ref-type="bibr" rid="ref41">41</xref>] developed a novel transformer approach and achieved state-of-the-art performance using only one modality from WESAD.</p>
        </sec>
        <sec>
          <title>Personalized Models</title>
          <p>Sah and Ghasemzadeh [<xref ref-type="bibr" rid="ref30">30</xref>] developed a generalized approach using a convolutional neural network using 1 modality from WESAD. For the 3-class classification problem, they achieved an average accuracy of 92.85%. They used the leave-one-subject-out (LOSO) analysis to highlight the need for personalization. Indikawati and Winiarti [<xref ref-type="bibr" rid="ref31">31</xref>] directly developed a personalized approach for the 4-class classification problem in WESAD (neutral, stress, amusement, and meditation). Using different feature extraction machine learning models, they achieved accuracies ranging from 88%-99% for the 15 participants. Liu et al [<xref ref-type="bibr" rid="ref32">32</xref>] developed a federated learning approach using data from WESAD with the goal of preserving user privacy. In doing so, they developed a personalized model as a baseline, which achieved an average accuracy of 90.2%. Nkurikiyeyezu et al [<xref ref-type="bibr" rid="ref42">42</xref>] determined that personalized models (95.2% accuracy) outperform generalized models (42.5% accuracy) for the stress versus no-stress task. By running additional experiments to further understand how personalized models compare to generalized models for the 3-class emotion classification task and by developing participant-inclusive and participant-exclusive versions of the generalized models, our work concretely demonstrates how personalization outperforms generalization and thus supports the conclusions of Nkurikiyeyezu et al [<xref ref-type="bibr" rid="ref42">42</xref>].</p>
        </sec>
      </sec>
      <sec>
        <title>Limitations and Future Work</title>
        <p>As shown in <xref ref-type="table" rid="table2">Tables 2</xref> and <xref ref-type="table" rid="table3">3</xref>, the performance of our personalized model deteriorates for participants 1 and 2. To analyze the lack of performance improvement of the personalized model for these 2 participants, we visualized the means and SDs of the different modalities for each emotion class. In <xref rid="figure3" ref-type="fig">Figures 3</xref>-<xref rid="figure5" ref-type="fig">5</xref>, we illustrate notable deviations in modality means and SDs for participants 1 and 2 compared to other participants. While the analysis of these modalities reveals important information about the nature of the WESAD data set, it still remains difficult to pinpoint the exact data set features that caused the performance decline in the personalized model for these 2 participants. This is another limitation: since we do not use a feature extraction model, we cannot assign a feature importance (eg, Gini importance) to individual features like Schmidt et al [<xref ref-type="bibr" rid="ref18">18</xref>] do. We also analyzed the emotion class balances for each participant, which are included in <xref ref-type="table" rid="table6">Table 6</xref>, to see if anomalies existed in the class distributions for certain participants. However, based on the ranges of the class distributions, class balance likely had minimal effect on the performance decline.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Deviations of mean and SD for participants 1 and 2 for neutral class modalities.</p>
          </caption>
          <graphic xlink:href="ai_v3i1e52171_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Deviations of mean and SD for subjects 1 and 2 for stress class modalities. EMG: electromyogram.</p>
          </caption>
          <graphic xlink:href="ai_v3i1e52171_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Deviations of mean and SD for subjects 1 and 2 for amusement class modalities.</p>
          </caption>
          <graphic xlink:href="ai_v3i1e52171_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table6">
          <label>Table 6</label>
          <caption>
            <p>Ranges of emotion class distributions per participant.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="500"/>
            <col width="500"/>
            <thead>
              <tr valign="top">
                <td>Emotion class</td>
                <td>Range, %</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Neutral</td>
                <td>51.8-54.0</td>
              </tr>
              <tr valign="top">
                <td>Stress</td>
                <td>29.0-31.8</td>
              </tr>
              <tr valign="top">
                <td>Amusement</td>
                <td>16.3-17.4</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>Our participant-inclusive and participant-exclusive generalized models do not outperform previously published generalized models on the WESAD data set (eg, Schmidt et al [<xref ref-type="bibr" rid="ref18">18</xref>] achieved up to 80% accuracy while we achieved 66.95% accuracy with our participant-inclusive model). This discrepancy can be attributed to a deliberate choice in our methodology: instead of maximizing our generalized models’ performance with hyperparameter tuning, we simply opted for a consistent set of hyperparameters across the personalized and generalized models because our primary objective was to evaluate their relative performance. While hyperparameter tuning might yield higher results in practice, differing hyperparameters between our models would introduce additional variables that make it difficult to determine the role that personalization and generalization play in model performance.</p>
        <p>Given the variations between participants, one approach to improving generalized model performance is adding embedding representations for each participant or participant-specific demographic data as additional features as a method of distinguishing individual participants in generalized models. However, to prevent overfitting to participant-specific features like demographic data, data sets with significantly more participants would need to be created, given the small sample size of the WESAD data set.</p>
        <p>One limitation that personalized models may encounter during training is the cold start problem, given that personalized models receive less data than generalized models. Moreover, despite the accuracy improvement in personalized models, developing a model for each participant may be costly and unscalable: data must be labeled specifically per participant, and enough data must be provided to the model to overcome the cold start problem (notably, however, even though the cold start problem should theoretically put our personalized model at a disadvantage, the WESAD data set provided enough data for our personalized model to outperform our generalized model). Both of these limitations can be addressed by a self-supervised learning approach to emotion recognition.</p>
        <p>A self-supervised learning approach follows a framework used by natural language processing models such as the Bidirectional Encoder Representations from Transformers (BERT) model [<xref ref-type="bibr" rid="ref43">43</xref>]. A model first pretrains on a large set of unlabeled data across numerous participants. Then, the pretrained model is fine-tuned to a small amount of labeled, participant-specific data. The pretraining phase eliminates the burden of manual labeling because all data are unlabeled, as well as the cold start problem because large amounts of data can be provided. The fine-tuning phase requires only a small amount of user-specific labeled data to perform accurately, and studies have already begun exploring the tradeoffs between the number of labels and model accuracy in WESAD using self-supervised or semisupervised approaches [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>].</p>
        <p>Finally, to expand beyond the WESAD data set, it is valuable to reproduce results on additional physiological signal data sets for emotion analysis, such as the Database for Emotion Analysis using Physiological Signals (DEAP) [<xref ref-type="bibr" rid="ref46">46</xref>] and Cognitive Load, Affect, and Stress (CLAS) [<xref ref-type="bibr" rid="ref47">47</xref>]. Data from WESAD were collected under controlled laboratory environments, which may not generalize to the real world. Therefore, analyzing emotions in a real-world context through data sets such as K-EmoCon [<xref ref-type="bibr" rid="ref48">48</xref>], which contain physiological data collected in naturalistic conversations, may be useful. Emotions in the K-EmoCon data set were categorized into 18 different classes, so exploring this data set could also help us better assess the benefits of personalization for a broader range of emotions. A major goal of this approach is to provide support for personalized digital interventions for neuropsychiatry, which could benefit a variety of applications, such as video-based digital therapeutics for children with autism to predict the child’s affective state as part of the therapeutic process [<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref52">52</xref>].</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">BERT</term>
          <def>
            <p>Bidirectional Encoder Representations from Transformers</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CLAS</term>
          <def>
            <p>Cognitive Load, Affect, and Stress</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">DEAP</term>
          <def>
            <p>Database for Emotion Analysis using Physiological Signals</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">ECG</term>
          <def>
            <p>electrocardiography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">EDA</term>
          <def>
            <p>electrodermal activity</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">EMG</term>
          <def>
            <p>electromyogram</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">LOSO</term>
          <def>
            <p>leave-one-subject-out</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">PPG</term>
          <def>
            <p>photoplethysmography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">SiLU</term>
          <def>
            <p>sigmoid linear unit</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">SWELL</term>
          <def>
            <p>Smart Reasoning for Well-being at Home and at Work</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">SWELL-KW</term>
          <def>
            <p>SWELL knowledge work</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">WESAD</term>
          <def>
            <p>Wearable Stress and Affect Dataset</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The project described was supported by grant U54GM138062 from the National Institute of General Medical Sciences (NIGMS), a component of the National Institutes of Health (NIH), and its contents are solely the responsibility of the author and do not necessarily represent the official view of NIGMS or NIH. The project was also supported by a grant from the Medical Research Award fund of the Hawai’i Community Foundation (grant MedRes_2023_00002689).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kendler</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Karkowski</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Prescott</surname>
              <given-names>CA</given-names>
            </name>
          </person-group>
          <article-title>Causal relationship between stressful life events and the onset of major depression</article-title>
          <source>Am J Psychiatry</source>
          <year>1999</year>
          <volume>156</volume>
          <issue>6</issue>
          <fpage>837</fpage>
          <lpage>841</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ajp.psychiatryonline.org/doi/full/10.1176/ajp.156.6.837"/>
          </comment>
          <pub-id pub-id-type="doi">10.1176/ajp.156.6.837</pub-id>
          <pub-id pub-id-type="medline">10360120</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chiang</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Turiano</surname>
              <given-names>NA</given-names>
            </name>
            <name name-style="western">
              <surname>Mroczek</surname>
              <given-names>DK</given-names>
            </name>
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>GE</given-names>
            </name>
          </person-group>
          <article-title>Affective reactivity to daily stress and 20-year mortality risk in adults with chronic illness: findings from the national study of daily experiences</article-title>
          <source>Health Psychol</source>
          <year>2018</year>
          <volume>37</volume>
          <issue>2</issue>
          <fpage>170</fpage>
          <lpage>178</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/29154603"/>
          </comment>
          <pub-id pub-id-type="doi">10.1037/hea0000567</pub-id>
          <pub-id pub-id-type="medline">29154603</pub-id>
          <pub-id pub-id-type="pii">2017-52072-001</pub-id>
          <pub-id pub-id-type="pmcid">PMC5794509</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Leger</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Charles</surname>
              <given-names>ST</given-names>
            </name>
            <name name-style="western">
              <surname>Almeida</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Let it go: lingering negative affect in response to daily stressors is associated with physical health years later</article-title>
          <source>Psychol Sci</source>
          <year>2018</year>
          <volume>29</volume>
          <issue>8</issue>
          <fpage>1283</fpage>
          <lpage>1290</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/29553880"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/0956797618763097</pub-id>
          <pub-id pub-id-type="medline">29553880</pub-id>
          <pub-id pub-id-type="pmcid">PMC6088503</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jorm</surname>
              <given-names>AF</given-names>
            </name>
          </person-group>
          <article-title>Mental health literacy: empowering the community to take action for better mental health</article-title>
          <source>Am Psychol</source>
          <year>2012</year>
          <volume>67</volume>
          <issue>3</issue>
          <fpage>231</fpage>
          <lpage>243</lpage>
          <pub-id pub-id-type="doi">10.1037/a0025957</pub-id>
          <pub-id pub-id-type="medline">22040221</pub-id>
          <pub-id pub-id-type="pii">2011-24866-001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mauss</surname>
              <given-names>IB</given-names>
            </name>
            <name name-style="western">
              <surname>Cook</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>JYJ</given-names>
            </name>
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>JJ</given-names>
            </name>
          </person-group>
          <article-title>Individual differences in cognitive reappraisal: experiential and physiological responses to an anger provocation</article-title>
          <source>Int J Psychophysiol</source>
          <year>2007</year>
          <volume>66</volume>
          <issue>2</issue>
          <fpage>116</fpage>
          <lpage>124</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijpsycho.2007.03.017</pub-id>
          <pub-id pub-id-type="medline">17543404</pub-id>
          <pub-id pub-id-type="pii">S0167-8760(07)00095-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jordan</surname>
              <given-names>AH</given-names>
            </name>
            <name name-style="western">
              <surname>Monin</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dweck</surname>
              <given-names>CS</given-names>
            </name>
            <name name-style="western">
              <surname>Lovett</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>John</surname>
              <given-names>OP</given-names>
            </name>
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>JJ</given-names>
            </name>
          </person-group>
          <article-title>Misery has more company than people think: underestimating the prevalence of others' negative emotions</article-title>
          <source>Pers Soc Psychol Bull</source>
          <year>2011</year>
          <volume>37</volume>
          <issue>1</issue>
          <fpage>120</fpage>
          <lpage>135</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/21177878"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/0146167210390822</pub-id>
          <pub-id pub-id-type="medline">21177878</pub-id>
          <pub-id pub-id-type="pii">37/1/120</pub-id>
          <pub-id pub-id-type="pmcid">PMC4138214</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lane</surname>
              <given-names>RD</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Levels of emotional awareness: theory and measurement of a socio-emotional skill</article-title>
          <source>J Intell</source>
          <year>2021</year>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>42</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jintelligence9030042"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jintelligence9030042</pub-id>
          <pub-id pub-id-type="medline">34449662</pub-id>
          <pub-id pub-id-type="pii">jintelligence9030042</pub-id>
          <pub-id pub-id-type="pmcid">PMC8395748</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>el Kaliouby</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Picard</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Baron-Cohen</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Affective computing and autism</article-title>
          <source>Ann N Y Acad Sci</source>
          <year>2006</year>
          <volume>1093</volume>
          <fpage>228</fpage>
          <lpage>248</lpage>
          <pub-id pub-id-type="doi">10.1196/annals.1382.016</pub-id>
          <pub-id pub-id-type="medline">17312261</pub-id>
          <pub-id pub-id-type="pii">1093/1/228</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>D'Alfonso</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lederman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Bucci</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Berry</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The digital therapeutic alliance and human-computer interaction</article-title>
          <source>JMIR Ment Health</source>
          <year>2020</year>
          <volume>7</volume>
          <issue>12</issue>
          <fpage>e21895</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mental.jmir.org/2020/12/e21895/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/21895</pub-id>
          <pub-id pub-id-type="medline">33372897</pub-id>
          <pub-id pub-id-type="pii">v7i12e21895</pub-id>
          <pub-id pub-id-type="pmcid">PMC7803473</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>A review of and roadmap for data science and machine learning for the neuropsychiatric phenotype of autism</article-title>
          <source>Annu Rev Biomed Data Sci</source>
          <year>2023</year>
          <volume>6</volume>
          <fpage>211</fpage>
          <lpage>228</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.annualreviews.org/doi/abs/10.1146/annurev-biodatasci-020722-125454?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1146/annurev-biodatasci-020722-125454</pub-id>
          <pub-id pub-id-type="medline">37137169</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Srivastava</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Voss</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kline</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Varma</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tariq</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Kalantarian</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Patnaik</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chrisman</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Stockham</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Paskov</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Haber</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>Data-driven diagnostics and the potential of mobile artificial intelligence for digital therapeutic phenotyping in computational psychiatry</article-title>
          <source>Biol Psychiatry Cogn Neurosci Neuroimaging</source>
          <year>2020</year>
          <volume>5</volume>
          <issue>8</issue>
          <fpage>759</fpage>
          <lpage>769</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32085921"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.bpsc.2019.11.015</pub-id>
          <pub-id pub-id-type="medline">32085921</pub-id>
          <pub-id pub-id-type="pii">S2451-9022(19)30340-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC7292741</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Voss</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Daniels</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kline</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Haber</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tariq</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Robinson</surname>
              <given-names>TN</given-names>
            </name>
            <name name-style="western">
              <surname>Desai</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Phillips</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Feinstein</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Winograd</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>Effect of wearable digital intervention for improving socialization in children with autism spectrum disorder: a randomized clinical trial</article-title>
          <source>JAMA Pediatr</source>
          <year>2019</year>
          <volume>173</volume>
          <issue>5</issue>
          <fpage>446</fpage>
          <lpage>454</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30907929"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamapediatrics.2019.0285</pub-id>
          <pub-id pub-id-type="medline">30907929</pub-id>
          <pub-id pub-id-type="pii">2728462</pub-id>
          <pub-id pub-id-type="pmcid">PMC6503634</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Voss</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kline</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Haber</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Daniels</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fazel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Feinstein</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Winograd</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>SuperpowerGlass: a wearable aid for the at-home therapy of children with autism</article-title>
          <source>Proc ACM Interact Mob Wearable Ubiquitous Technol</source>
          <year>2017</year>
          <volume>1</volume>
          <issue>3</issue>
          <fpage>1</fpage>
          <lpage>22</lpage>
          <pub-id pub-id-type="doi">10.1145/3130977</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rainville</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bechara</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Naqvi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Damasio</surname>
              <given-names>AR</given-names>
            </name>
          </person-group>
          <article-title>Basic emotions are associated with distinct patterns of cardiorespiratory activity</article-title>
          <source>Int J Psychophysiol</source>
          <year>2006</year>
          <volume>61</volume>
          <issue>1</issue>
          <fpage>5</fpage>
          <lpage>18</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijpsycho.2005.10.024</pub-id>
          <pub-id pub-id-type="medline">16439033</pub-id>
          <pub-id pub-id-type="pii">S0167-8760(05)00280-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nummenmaa</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Glerean</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Hari</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hietanen</surname>
              <given-names>JK</given-names>
            </name>
          </person-group>
          <article-title>Bodily maps of emotions</article-title>
          <source>Proc Natl Acad Sci U S A</source>
          <year>2014</year>
          <volume>111</volume>
          <issue>2</issue>
          <fpage>646</fpage>
          <lpage>651</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.pnas.org/doi/abs/10.1073/pnas.1321664111?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1073/pnas.1321664111</pub-id>
          <pub-id pub-id-type="medline">24379370</pub-id>
          <pub-id pub-id-type="pii">1321664111</pub-id>
          <pub-id pub-id-type="pmcid">PMC3896150</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jang</surname>
              <given-names>EH</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Sohn</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>Analysis of physiological signals for recognition of boredom, pain, and surprise emotions</article-title>
          <source>J Physiol Anthropol</source>
          <year>2015</year>
          <volume>34</volume>
          <issue>1</issue>
          <fpage>25</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://jphysiolanthropol.biomedcentral.com/articles/10.1186/s40101-015-0063-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s40101-015-0063-5</pub-id>
          <pub-id pub-id-type="medline">26084816</pub-id>
          <pub-id pub-id-type="pii">10.1186/s40101-015-0063-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC4490654</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Domínguez-Jiménez</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Campo-Landines</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Martínez-Santos</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Delahoz</surname>
              <given-names>EJ</given-names>
            </name>
            <name name-style="western">
              <surname>Contreras-Ortiz</surname>
              <given-names>SH</given-names>
            </name>
          </person-group>
          <article-title>A machine learning model for emotion recognition from physiological signals</article-title>
          <source>Biomed Signal Process Control</source>
          <year>2020</year>
          <volume>55</volume>
          <fpage>101646</fpage>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2019.101646</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schmidt</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Reiss</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Duerichen</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Marberger</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Van Laerhoven</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Introducing WESAD, a multimodal dataset for wearable stress and affect detection</article-title>
          <year>2018</year>
          <conf-name>ICMI '18: Proceedings of the 20th ACM International Conference on Multimodal Interaction</conf-name>
          <conf-date>October 16-20, 2018</conf-date>
          <conf-loc>Boulder, CO</conf-loc>
          <fpage>400</fpage>
          <lpage>408</lpage>
          <pub-id pub-id-type="doi">10.1145/3242969.3242985</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>YJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>XS</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Virk</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>An emotion recognition system based on physiological signals obtained by wearable sensors</article-title>
          <source>Wearable Sensors and Robots: Proceedings of International Conference on Wearable Sensors and Robots 2015</source>
          <year>2017</year>
          <publisher-loc>Singapore</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>15</fpage>
          <lpage>25</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ramzan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dawn</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Fused CNN-LSTM deep learning emotion recognition model using electroencephalography signals</article-title>
          <source>Int J Neurosci</source>
          <year>2023</year>
          <volume>133</volume>
          <issue>6</issue>
          <fpage>587</fpage>
          <lpage>597</lpage>
          <pub-id pub-id-type="doi">10.1080/00207454.2021.1941947</pub-id>
          <pub-id pub-id-type="medline">34121598</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vijayakumar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Flynn</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Murray</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>A comparative study of machine learning techniques for emotion recognition from peripheral physiological signals</article-title>
          <year>2020</year>
          <conf-name>2020 31st Irish Signals and Systems Conference (ISSC)</conf-name>
          <conf-date>June 11-12, 2020</conf-date>
          <conf-loc>Letterkenny, Ireland</conf-loc>
          <pub-id pub-id-type="doi">10.1109/issc49989.2020.9180193</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huynh</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pirttikangas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Siirtola</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>StressNAS: affect state and stress detection using neural architecture search</article-title>
          <year>2021</year>
          <conf-name>UbiComp/ISWC '21 Adjunct: Adjunct Proceedings of the 2021 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2021 ACM International Symposium on Wearable Computers</conf-name>
          <conf-date>September 21-26, 2021</conf-date>
          <conf-loc>Virtual</conf-loc>
          <fpage>121</fpage>
          <lpage>125</lpage>
          <pub-id pub-id-type="doi">10.1145/3460418.3479320</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hsieh</surname>
              <given-names>CP</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>YT</given-names>
            </name>
            <name name-style="western">
              <surname>Beh</surname>
              <given-names>WK</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>AYA</given-names>
            </name>
          </person-group>
          <article-title>Feature selection framework for XGBoost based on electrodermal activity in stress detection</article-title>
          <year>2019</year>
          <conf-name>2019 IEEE International Workshop on Signal Processing Systems (SiPS)</conf-name>
          <conf-date>October 20-23, 2019</conf-date>
          <conf-loc>Nanjing, China</conf-loc>
          <pub-id pub-id-type="doi">10.1109/sips47522.2019.9020321</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Garg</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Santhosh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dengel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ishimaru</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Stress detection by machine learning and wearable sensors</article-title>
          <year>2021</year>
          <conf-name>IUI '21 Companion: 26th International Conference on Intelligent User Interfaces - Companion</conf-name>
          <conf-date>April 14-17, 2021</conf-date>
          <conf-loc>College Station, TX</conf-loc>
          <fpage>43</fpage>
          <lpage>45</lpage>
          <pub-id pub-id-type="doi">10.1145/3397482.3450732</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lai</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yanushkevich</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Shmerko</surname>
              <given-names>VP</given-names>
            </name>
          </person-group>
          <article-title>Intelligent stress monitoring assistant for first responders</article-title>
          <source>IEEE Access</source>
          <year>2021</year>
          <volume>9</volume>
          <fpage>25314</fpage>
          <lpage>25329</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/9348878"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/access.2021.3057578</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siirtola</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Continuous stress detection using the sensors of commercial smartwatch</article-title>
          <year>2019</year>
          <conf-name>UbiComp/ISWC '19 Adjunct: Adjunct Proceedings of the 2019 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2019 ACM International Symposium on Wearable Computers</conf-name>
          <conf-date>September 9-13, 2019</conf-date>
          <conf-loc>London, United Kingdom</conf-loc>
          <fpage>1198</fpage>
          <lpage>1201</lpage>
          <pub-id pub-id-type="doi">10.1145/3341162.3344831</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bobade</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Vani</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Stress detection with machine learning and deep learning using multimodal physiological data</article-title>
          <year>2020</year>
          <conf-name>2020 Second International Conference on Inventive Research in Computing Applications (ICIRCA)</conf-name>
          <conf-date>July 15-17, 2020</conf-date>
          <conf-loc>Coimbatore, India</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icirca48905.2020.9183244</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Hierarchical deep neural network for mental stress state detection using IoT based biomarkers</article-title>
          <source>Pattern Recognit Lett</source>
          <year>2021</year>
          <volume>145</volume>
          <fpage>81</fpage>
          <lpage>87</lpage>
          <pub-id pub-id-type="doi">10.1016/j.patrec.2021.01.030</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schmidt</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Reiss</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dürichen</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Van Laerhoven</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Wearable-based affect recognition: a review</article-title>
          <source>Sensors (Basel)</source>
          <year>2019</year>
          <volume>19</volume>
          <issue>19</issue>
          <fpage>4079</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s19194079"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s19194079</pub-id>
          <pub-id pub-id-type="medline">31547220</pub-id>
          <pub-id pub-id-type="pii">s19194079</pub-id>
          <pub-id pub-id-type="pmcid">PMC6806301</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sah</surname>
              <given-names>RK</given-names>
            </name>
            <name name-style="western">
              <surname>Ghasemzadeh</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Stress classification and personalization: getting the most out of the least</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on July 12 2021</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2107.05666</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Indikawati</surname>
              <given-names>FI</given-names>
            </name>
            <name name-style="western">
              <surname>Winiarti</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Stress detection from multimodal wearable sensor data</article-title>
          <source>IOP Conf Ser Mater Sci Eng</source>
          <year>2020</year>
          <volume>771</volume>
          <issue>1</issue>
          <fpage>012028</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://iopscience.iop.org/article/10.1088/1757-899X/771/1/012028/pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.1088/1757-899X/771/1/012028</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Goetz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tewari</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Learning from others without sacrificing privacy: simulation comparing centralized and federated machine learning on mobile health data</article-title>
          <source>JMIR Mhealth Uhealth</source>
          <year>2021</year>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>e23728</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mhealth.jmir.org/2021/3/e23728/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/23728</pub-id>
          <pub-id pub-id-type="medline">33783362</pub-id>
          <pub-id pub-id-type="pii">v9i3e23728</pub-id>
          <pub-id pub-id-type="pmcid">PMC8044739</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahmad</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>A survey on physiological signal-based emotion recognition</article-title>
          <source>Bioengineering (Basel)</source>
          <year>2022</year>
          <volume>9</volume>
          <issue>11</issue>
          <fpage>688</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=bioengineering9110688"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/bioengineering9110688</pub-id>
          <pub-id pub-id-type="medline">36421089</pub-id>
          <pub-id pub-id-type="pii">bioengineering9110688</pub-id>
          <pub-id pub-id-type="pmcid">PMC9687364</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ronneberger</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Fischer</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Brox</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Navab</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hornegger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wells</surname>
              <given-names>WM</given-names>
            </name>
            <name name-style="western">
              <surname>Frangi</surname>
              <given-names>AF</given-names>
            </name>
          </person-group>
          <article-title>U-net: convolutional networks for biomedical image segmentation</article-title>
          <source>Medical Image Computing and Computer-Assisted Intervention – MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III</source>
          <year>2015</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>234</fpage>
          <lpage>241</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Paszke</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Massa</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Lerer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bradbury</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chanan</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Killeen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Gimelshein</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Antiga</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Desmaison</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kopf</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>DeVito</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Raison</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tejani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chilamkurthy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Steiner</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bai</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chintala</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>PyTorch: an imperative style, high-performance deep learning library</article-title>
          <year>2019</year>
          <conf-name>33rd Conference on Neural Information Processing Systems (NeurIPS 2019)</conf-name>
          <conf-date>December 8–14, 2019</conf-date>
          <conf-loc>Vancouver, BC</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Albaladejo-González</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ruipérez-Valiente</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Gómez Mármol</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Evaluating different configurations of machine learning models and their transfer learning capabilities for stress detection using heart rate</article-title>
          <source>J Ambient Intell Humaniz Comput</source>
          <year>2023</year>
          <volume>14</volume>
          <issue>8</issue>
          <fpage>11011</fpage>
          <lpage>11021</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://link.springer.com/article/10.1007/s12652-022-04365-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s12652-022-04365-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Koldijk</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sappelli</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Verberne</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Neerincx</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Kraaij</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>The SWELL knowledge work dataset for stress and user modeling research</article-title>
          <year>2014</year>
          <conf-name>ICMI '14: Proceedings of the 16th International Conference on Multimodal Interaction</conf-name>
          <conf-date>November 12-16, 2014</conf-date>
          <conf-loc>Istanbul, Turkey</conf-loc>
          <fpage>291</fpage>
          <lpage>298</lpage>
          <pub-id pub-id-type="doi">10.1145/2663204.2663257</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ghosh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ijaz</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmud</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Classification of mental stress from wearable physiological sensors using image-encoding-based deep neural network</article-title>
          <source>Biosensors (Basel)</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>12</issue>
          <fpage>1153</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=bios12121153"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/bios12121153</pub-id>
          <pub-id pub-id-type="medline">36551120</pub-id>
          <pub-id pub-id-type="pii">bios12121153</pub-id>
          <pub-id pub-id-type="pmcid">PMC9775098</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bajpai</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Evaluating KNN performance on WESAD dataset</article-title>
          <year>2020</year>
          <conf-name>2020 12th International Conference on Computational Intelligence and Communication Networks (CICN)</conf-name>
          <conf-date>September 25-26, 2020</conf-date>
          <conf-loc>Bhimtal, India</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cicn49253.2020.9242568</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Almadhor</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sampedro</surname>
              <given-names>GA</given-names>
            </name>
            <name name-style="western">
              <surname>Abisado</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abbas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>YJ</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Baili</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cha</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>Wrist-based electrodermal activity monitoring for stress detection using federated learning</article-title>
          <source>Sensors (Basel)</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>8</issue>
          <fpage>3984</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s23083984"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s23083984</pub-id>
          <pub-id pub-id-type="medline">37112323</pub-id>
          <pub-id pub-id-type="pii">s23083984</pub-id>
          <pub-id pub-id-type="pmcid">PMC10146352</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Behinaein</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Bhatti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rodenburg</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Hungler</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Etemad</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A transformer architecture for stress detection from ECG</article-title>
          <year>2021</year>
          <conf-name>ISWC '21: Proceedings of the 2021 ACM International Symposium on Wearable Computers</conf-name>
          <conf-date>September 21-26, 2021</conf-date>
          <conf-loc>Virtual</conf-loc>
          <fpage>132</fpage>
          <lpage>134</lpage>
          <pub-id pub-id-type="doi">10.1145/3460421.3480427</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nkurikiyeyezu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yokokubo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lopez</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>The effect of person-specific biometrics in improving generic stress predictive models</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on December 31 2019</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.1910.01770</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Devlin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>MW</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Toutanova</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>BERT: pre-training of deep bidirectional transformers for language understanding</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on May 24 2019</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.1810.04805</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Sarkar</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Semi-supervised generative adversarial network for stress detection using partially labeled physiological data</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on October 27 2022</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2206.14976</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Islam</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Personalized prediction of recurrent stress events using self-supervised learning on multimodal time-series data</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on July 07 2023</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2307.03337</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Koelstra</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Muhl</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Soleymani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Yazdani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ebrahimi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Pun</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nijholt</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Patras</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>DEAP: a database for emotion analysis using physiological signals</article-title>
          <source>IEEE Trans Affect Comput</source>
          <year>2012</year>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>18</fpage>
          <lpage>31</lpage>
          <pub-id pub-id-type="doi">10.1109/t-affc.2011.15</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Markova</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ganchev</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kalinkov</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>CLAS: a database for cognitive load, affect and stress recognition</article-title>
          <year>2019</year>
          <conf-name>2019 International Conference on Biomedical Innovations and Applications (BIA)</conf-name>
          <conf-date>November 8-9, 2019</conf-date>
          <conf-loc>Varna, Bulgaria</conf-loc>
          <pub-id pub-id-type="doi">10.1109/bia48344.2019.8967457</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Cha</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Khandoker</surname>
              <given-names>AH</given-names>
            </name>
            <name name-style="western">
              <surname>Hadjileontiadis</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>U</given-names>
            </name>
          </person-group>
          <article-title>K-EmoCon, a multimodal sensor dataset for continuous emotion recognition in naturalistic conversations</article-title>
          <source>Sci Data</source>
          <year>2020</year>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>293</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41597-020-00630-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41597-020-00630-y</pub-id>
          <pub-id pub-id-type="medline">32901038</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41597-020-00630-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC7479607</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Daniels</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>JN</given-names>
            </name>
            <name name-style="western">
              <surname>Voss</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Haber</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Fazel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kline</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Feinstein</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Winograd</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>Exploratory study examining the at-home feasibility of a wearable tool for social-affective learning in children with autism</article-title>
          <source>NPJ Digit Med</source>
          <year>2018</year>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>32</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-018-0035-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-018-0035-3</pub-id>
          <pub-id pub-id-type="medline">31304314</pub-id>
          <pub-id pub-id-type="pii">35</pub-id>
          <pub-id pub-id-type="pmcid">PMC6550272</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Daniels</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Haber</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Voss</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tamura</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fazel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kline</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Phillips</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Winograd</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Feinstein</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>Feasibility testing of a wearable behavioral aid for social learning in children with autism</article-title>
          <source>Appl Clin Inform</source>
          <year>2018</year>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>129</fpage>
          <lpage>140</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/29466819"/>
          </comment>
          <pub-id pub-id-type="doi">10.1055/s-0038-1626727</pub-id>
          <pub-id pub-id-type="medline">29466819</pub-id>
          <pub-id pub-id-type="pmcid">PMC5821509</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kalantarian</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jedoui</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tariq</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Dunlap</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>Labeling images with facial emotion and the potential for pediatric healthcare</article-title>
          <source>Artif Intell Med</source>
          <year>2019</year>
          <volume>98</volume>
          <fpage>77</fpage>
          <lpage>86</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0933-3657(18)30259-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2019.06.004</pub-id>
          <pub-id pub-id-type="medline">31521254</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(18)30259-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC6855300</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kalantarian</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jedoui</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>A mobile game for automatic emotion-labeling of images</article-title>
          <source>IEEE Trans Games</source>
          <year>2020</year>
          <volume>12</volume>
          <issue>2</issue>
          <fpage>213</fpage>
          <lpage>218</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32551410"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/tg.2018.2877325</pub-id>
          <pub-id pub-id-type="medline">32551410</pub-id>
          <pub-id pub-id-type="pmcid">PMC7301713</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
