<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR AI</journal-id>
      <journal-title>JMIR AI</journal-title>
      <issn pub-type="epub">2817-1705</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v2i1e42337</article-id>
      <article-id pub-id-type="pmid">38875548</article-id>
      <article-id pub-id-type="doi">10.2196/42337</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>A Trainable Open-Source Machine Learning Accelerometer Activity Recognition Toolbox: Deep Learning Approach</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>El Emam</surname>
            <given-names>Khaled</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Malin</surname>
            <given-names>Bradley</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Li</surname>
            <given-names>Hong</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Lim</surname>
            <given-names>Gilbert</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Aqajari</surname>
            <given-names>Seyed Amir Hossein</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Wang</surname>
            <given-names>Yuli</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Wieland</surname>
            <given-names>Fluri</given-names>
          </name>
          <degrees>BSc, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Health Science</institution>
            <institution>Institute of Sports Science</institution>
            <institution>University of Bern</institution>
            <addr-line>Bremgartenstrasse 145</addr-line>
            <addr-line>Bern, 3012</addr-line>
            <country>Switzerland</country>
            <phone>41 787347220</phone>
            <email>flu.wieland@gmail.com</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2129-7726</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Nigg</surname>
            <given-names>Claudio</given-names>
          </name>
          <degrees>BSc, MSc, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2897-4689</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Health Science</institution>
        <institution>Institute of Sports Science</institution>
        <institution>University of Bern</institution>
        <addr-line>Bern</addr-line>
        <country>Switzerland</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Fluri Wieland <email>flu.wieland@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2023</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>8</day>
        <month>6</month>
        <year>2023</year>
      </pub-date>
      <volume>2</volume>
      <elocation-id>e42337</elocation-id>
      <history>
        <date date-type="received">
          <day>21</day>
          <month>9</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>21</day>
          <month>12</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>28</day>
          <month>2</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>22</day>
          <month>4</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Fluri Wieland, Claudio Nigg. Originally published in JMIR AI (https://ai.jmir.org), 08.06.2023.</copyright-statement>
      <copyright-year>2023</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on https://www.ai.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://ai.jmir.org/2023/1/e42337" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>The accuracy of movement determination software in current activity trackers is insufficient for scientific applications, which are also not open-source.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>To address this issue, we developed an accurate, trainable, and open-source smartphone-based activity-tracking toolbox that consists of an Android app (<italic>HumanActivityRecorder</italic>) and 2 different deep learning algorithms that can be adapted to new behaviors.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We employed a semisupervised deep learning approach to identify the different classes of activity based on accelerometry and gyroscope data, using both our own data and open competition data.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Our approach is robust against variation in sampling rate and sensor dimensional input and achieved an accuracy of around 87% in classifying 6 different behaviors on both our own recorded data and the MotionSense data. However, if the dimension-adaptive neural architecture model is tested on our own data, the accuracy drops to 26%, which demonstrates the superiority of our algorithm, which performs at 63% on the MotionSense data used to train the dimension-adaptive neural architecture model.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p><italic>HumanActivityRecorder</italic> is a versatile, retrainable, open-source, and accurate toolbox that is continually tested on new data. This enables researchers to adapt to the behavior being measured and achieve repeatability in scientific studies.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>activity classification</kwd>
        <kwd>deep learning</kwd>
        <kwd>accelerometry</kwd>
        <kwd>open source</kwd>
        <kwd>activity recognition</kwd>
        <kwd>machine learning</kwd>
        <kwd>activity recorder</kwd>
        <kwd>digital health application</kwd>
        <kwd>smartphone app</kwd>
        <kwd>deep learning algorithm</kwd>
        <kwd>sensor device</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>The last decade has seen a significant increase in worldwide smartphone ownership [<xref ref-type="bibr" rid="ref1">1</xref>], with approximately half of the world’s population now owning a smartphone and a device penetration rate of 80% in Germany and the United Kingdom [<xref ref-type="bibr" rid="ref2">2</xref>]. Even low-end smartphones are equipped with various sensors, including accelerometers, gyroscopes, proximity sensors, magnetometers, and GPS receivers, along with energy-efficient processors and stable internet connections. With the advent of smartphones and wearables, physical activity analysis has greatly gained in popularity. Accelerometry-based behavior analysis has a variety of applications, such as fall detection in older patients [<xref ref-type="bibr" rid="ref3">3</xref>], health monitoring [<xref ref-type="bibr" rid="ref4">4</xref>], work-related stress analysis [<xref ref-type="bibr" rid="ref5">5</xref>], and sleep analysis [<xref ref-type="bibr" rid="ref6">6</xref>]. The widespread use of accelerometry in everyday smartphone apps has reduced the cost of gyroscope and accelerometer sensors, which has in turn accelerated their development. While wearables have gained popularity as accelerometer devices, smartphones still make up the majority of them.</p>
        <p>Many studies have shown the accuracy and reliability of smartphone sensors in accelerometry [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]. Although wearables tend to provide more accurate behavior classifications, the potential of using smartphones far outweighs the additional accuracy gained from wearables. Although they are more precise thus far [<xref ref-type="bibr" rid="ref10">10</xref>], the cost of wearables for larger study populations is very high, compared with the widespread popularity and affordability of smartphones, making them a more accessible option for research. Additionally, smartphone apps are easier to distribute, update, configure, and adapt to specific research questions than wearables. Wearables also have the disadvantage of limited software support and closed-source software, making research based on previous software nonreproducible after algorithm updates. This means that wearables bought for research purposes must be replaced on a regular basis.</p>
        <p>Most importantly, however, the default software of wearable manufacturers is in almost all cases not open-source, meaning that after each change of the algorithm (ie, app update) that classifies behavior, research based on previous software is not reproducible anymore. Furthermore, in most cases, charges apply for the use of the said software. On the other hand, some smartphone manufacturers offer free, open-source toolboxes for movement activity recognition, such as Samsung and Huawei. However, these toolboxes only recognize a limited number of activity types and are at the time of writing not trainable to new activities. The purpose of both, however, is for them to be integrated into applications, so they can be used to determine whether a smartphone user is moving and is active or not, in order to interact with application functionality, such as energy saving while not moving, clocking active hours, or encouraging movement when a user is inactive. While data can be collected and stored, the behavior classes are fixed and neither trainable nor retrainable. To address these limitations, the scientific community needs access to an open-source, adaptable behavior analysis toolbox that also facilitates reproducible research and is adaptable to specific research questions. To fulfil this need, we present our open-source, deep learning–based behavior analysis toolbox. Our Human Activity Analysis toolbox includes a proprietary Android app, 2 deep learning algorithms, scripts to process data, and a continually expanding sample data set. The toolbox has been validated with a sample of 68 University of Bern students and employees.</p>
      </sec>
      <sec>
        <title>Activity Recognition and Deep Learning Background</title>
        <p>Deep learning algorithms have gained importance in classifying human behavior based on sensor data collected from accelerometers, gyroscopes, and magnetometers [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref18">18</xref>] (for a deeper understanding and comprehensive overview, see [<xref ref-type="bibr" rid="ref19">19</xref>]). These algorithms are based on artificial neural networks, and specifically, deep neural networks (DNNs) have become the dominant approach for activity recognition as of 2022. DNNs consist of multiple layers of neurons of similar or different types, and the functionality of these neurons is determined by the nature of the layers and the way they are interconnected [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. It is important to note that a standard neural network consists of many simple, connected processors called neurons, each producing a sequence of real-valued activations. Depending on the problem and how the neurons are connected, such behavior may require long causal chains of computational stages. Thus, if multiple layers of neurons are used sequentially, we speak of DNNs [<xref ref-type="bibr" rid="ref20">20</xref>].</p>
        <p>Most DNN architectures consist of a convolutional neural network (CNN) layer, followed by either a feedforward neural network (FNN) layer or a recurrent neural network (RNN) layer. Unlike the output from an RNN neuron, which is fed back into the same layer, the output from an FNN neuron is only connected to the next layer. CNNs handle variable input dimensions quite well and are mainly used for feature extraction for the RNN or FNN layer, which, combined with a prior CNN, output a better generalization than if fed with raw sensor data [<xref ref-type="bibr" rid="ref22">22</xref>]. However, FNNs only work well with data of the same input dimensions, and RNNs only work with a fixed number of streams. As a result, the widely used CNN-RNN-FNN combinations do not work with varying input dimensions. This means that if data collection from one sensor stops, the movement type cannot be classified by the DNN that was trained on multiple input dimensions. In order to save battery life in smartphones during long-term recordings, it is often desirable to temporarily disable certain sensors or to vary the sampling rate of sensors, which results in changing the input dimensions for the DNN.</p>
        <p>When a participant is sitting for an extended period, disabling the gyroscope sensor can conserve battery life. This is because the rotational position is unlikely to change significantly without significant acceleration changes unless the person is in an aircraft and the gravitational acceleration is being compensated for in the data. In order to determine when the activity type changes, it is sufficient to use a low recording frequency. This means that it is possible to deactivate the gyroscope and magnetometer and lower the accelerometer recording frequency. To determine when the activity type changes, a very low recording frequency suffices, so it is desirable to deactivate the gyroscope and magnetometer and lower the accelerometer recording frequency significantly. Dummy data can be generated to compensate for missing data in order to maintain the accuracy of the trained CNN-FNN-RNN model [<xref ref-type="bibr" rid="ref23">23</xref>]. However, this approach can result in a loss of accuracy in classification. Another solution is to insert a global pooling layer, but this also leads to a reduction in accuracy. This, however, leads to accuracy loss in classification. Another solution is to insert a global pooling layer [<xref ref-type="bibr" rid="ref24">24</xref>], but this also leads to a reduction in accuracy.</p>
        <p>Previous publications on accelerometry-based movement recognition have shown great success but significant limitations. Ordóñez and Roggen [<xref ref-type="bibr" rid="ref15">15</xref>] presented a deep-CNN–based framework, which they tested against models such as decision tree, random forest, and support vector machines. Trained and then tested on a data set, the accuracy reached up to 86.7%. The authors then analyzed which component of the data had the biggest impact on classification accuracy and determined this to be changes in acceleration, which is in line with our own results.</p>
        <p>Wang et al [<xref ref-type="bibr" rid="ref11">11</xref>] offer a comprehensive survey of recent advancements in activity recognition and associated methodologies. Their work sheds light on the various strengths and weaknesses of deep learning models when it comes to activity classification. Although most models perform accurately on their trained data [<xref ref-type="bibr" rid="ref25">25</xref>], significant limitations remain. First, the lack of extensive, labeled accelerometry data sets limits their efficacy. Second, the generalization capabilities of models need improvement. Third, models struggle with sensor noise and input variability, highlighting a need for greater robustness. Our algorithms aim to address these issues, working to mitigate the associated limitations and enhance overall model performance. To achieve this, we build upon previous research by incorporating and improving upon their methodologies while also introducing our own additional data set for algorithm training.</p>
        <p>Malekzadeh et al [<xref ref-type="bibr" rid="ref26">26</xref>] proposed a new model, which tries to counteract the aforementioned shortcomings by introducing a <italic>dimension-adaptive pooling</italic> (DAP) layer, which makes DNNs robust to changes in not only sampling rates but also dimensional changes of the data due to varying sensor availability.</p>
        <p>The authors also introduced a <italic>dimension-adaptive training</italic> layer, and combined it with the classical CNN-FNN-RNN approach and the DAP layer. They claim that dimension-adaptive neural architecture (DANA) can prevent losses in classification accuracy, even under varying sensor availability and temporal sampling rate changes. This model was tested on 4 publicly available data sets, including the MotionSense [<xref ref-type="bibr" rid="ref27">27</xref>] data set, which consists of accelerometer data from 24 students at Queen Mary University of London.</p>
        <p>Our goal was to not only implement this model into our own DNN, but also to improve upon it and validate it using our own data. The robustness of the DANA model is very promising, making it a valuable addition to our research.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Ethical Considerations</title>
        <p>According to the guidelines stated on the Ethics Commission page of the University of Bern's Faculty of Human Sciences, no ethics committee approval was required for this research. This conclusion is based on the fact that all data was collected with participants' informed consent, the data collection was conducted anonymously, and the research activities only involved non-hazardous tasks such as standing, sitting, walking, and ascending or descending stairs. No personal data was collected.</p>
      </sec>
      <sec>
        <title>Training Data</title>
        <p>The data used for the initial training of the neural network was gathered from the MotionSense Github repository. These data consist of accelerometer and gyroscope readings from an iPhone 6s (Apple Inc), collected at a frequency of 50 Hz by 24 participants who followed a set of actions on the campus of Queen Mary University of London. These actions included ascending or descending stairs, sitting, walking, standing, and jogging (<xref rid="figure1" ref-type="fig">Figure 1</xref>). The data recorded gravity, acceleration, rotation, and attitude on 3 axes.</p>
        <p>After conducting a principal component analysis, we found that the X, Y, and Z acceleration and rotational changes were the most predictive factors in classifying the participant’s behavior (<xref rid="figure2" ref-type="fig">Figure 2</xref>). Therefore, only these 6 values were used in the training of the algorithm. As a result, our app only records these 6 values, which are then used for further analysis.</p>
        <p>To gather more data and validate our model, we set up our own course of action on the campus of the Centre for Sports Science at the University of Bern, modeled after the course used at Queen Mary University. A total of 68 participants (aged 21-59, median 26, SD 3.2 years), who were students and employees of the University of Bern, completed the course while our <italic>HumanActivityRecorder</italic> Android app (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) was running and collecting data. All participants were fully informed about the task and gave their consent for the data collection.</p>
        <p>The course consisted of approximately 300 seconds of walking, jogging, sitting, and walking up and down stairs and standing still (<xref rid="figure3" ref-type="fig">Figure 3</xref>). All participants completed all segments of the course, and the corresponding data segments were manually labeled for use in training the models.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Course for accelerometer data collection on the campus of the Queen Mary University of London for the MotionSense data set; graph from Malekzadeh et al [<xref ref-type="bibr" rid="ref26">26</xref>].</p>
          </caption>
          <graphic xlink:href="ai_v2i1e42337_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Data example of the MotionSense data set. Note that some values do not change significantly when normalized over the course of recording and are therefore of lesser interest for the prediction of behavior.</p>
          </caption>
          <graphic xlink:href="ai_v2i1e42337_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Course on the premises of the University of Bern. Participants followed the indicated path, starting walking, followed by jogging, sitting, ascending stairs, standing, and descending stairs. Completion took an average of approximately 300 seconds.</p>
          </caption>
          <graphic xlink:href="ai_v2i1e42337_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>The participants completed the course in 2 groups with different instructions. Group 1 (n=29, median age 26, SD 5.2 years) was instructed to wear the smartphone in their preferred manner. Group 2 (n=39, median age 27, SD 4.7 years) wore the smartphone in the right front trousers’ pocket, with the display facing toward the body and the top of the phone pointing down while standing. This placement is consistent with the data collection method used for the <italic>MotionSense</italic> data set, as discussed above. It was found that the orientation of the smartphone has a significant impact on the performance of the model. To ensure consistency and comparability between the data sets, our algorithm was trained on the data of group 2, as wearing the smartphone in an individually preferred manner (group 1) resulted in significantly worse performance in classification accuracy. For a detailed comparison of classification accuracy between groups 1 and 2, please refer to <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p>
      </sec>
      <sec>
        <title>App</title>
        <p>The accelerometer and gyroscope data were collected using our custom-made <italic>HumanActivityRecorder</italic> Android app, which was developed using Android Studio 4.1 with Java 1.8.0_271 (<xref rid="figure4" ref-type="fig">Figure 4</xref>). The app records accelerometer and gyroscope data at a sampling rate of 50 Hz and is publicly available on the Google Play Store as version 13 of the <italic>HumanActivityRecorder</italic> app. The accelerometer data are recorded in the x-, y-, and z-axes, while the gyroscope data consist of rotation around these axes (roll, pitch, and yaw) at the same frequency. The data are then automatically sent to a server and can be downloaded as a CSV or JSON file. The source code is available on Github [<xref ref-type="bibr" rid="ref28">28</xref>]. The app is compatible with Android 5.0 and later versions. We used an Honor View 20 smartphone for data collection to ensure consistency in recording. Only 1 device was used.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Comparison of the models used in our study. The dimension-adaptive neural architecture (DANA) model, consists of several additional layers, which we found did not improve the classification of our data. Note that in our simplified model, the dimension-adaptive pooling (DAP) layer has been omitted as well, since our data are dimensionally consistent. LSTM: Long short-term memory.</p>
          </caption>
          <graphic xlink:href="ai_v2i1e42337_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Recording</title>
        <p>Before beginning the data collection process, the participants were asked for their name, age, and consent. The data collection paradigm was explained to them and demonstrated through a walk-through by the data collector. The participants then completed the course, which included walking, jogging, sitting, ascending and descending stairs, and standing still, while the app recorded their accelerometer and gyroscope data. After completing the course, the participants were given a chocolate bar as an incentive. The accelerometer data were processed and categorized using a Jupyter notebook script, which automates the workflow to ensure consistency in categorization. This script is part of our toolbox.</p>
      </sec>
      <sec>
        <title>Deep Learning Model</title>
        <p>We implemented a modified version of the DANA model proposed by Malekzadeh et al [<xref ref-type="bibr" rid="ref19">19</xref>], which involved removing and modifying several layers. This modification was made after testing the model (trained and tested on <italic>MotionSense</italic> data) and finding that the omission of these layers did not noticeably decrease the model’s performance.</p>
        <p>It is important to note that in our simplified model, we removed the DAP layer as our input data are dimensionally consistent at the time of testing. To validate the models, we trained them both on the <italic>MotionSense</italic> data set and our own data set, as well as testing both combinations.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>Through a systematic variation of the number of nodes and layers, we determined that the best balance between accuracy and complexity is achieved with the described architecture. This architecture was determined based on the accuracy of the models in classifying movement types of the <italic>MotionSense</italic> data set when trained on the same data set. Interestingly, when we trained on the <italic>MotionSense</italic> data set and tested on our own data, our model performed better than DANA, yet still with room for improvement, at 63% vs 26%.</p>
      <p>When trained on the same data set as the one they are tested on, both models performed well in classifying behavior. The DANA model achieved approximately 87% accuracy when trained and tested on the MotionSense data set and approximately 90% accuracy when trained and tested on our own data, depending on the sampling rate (<xref rid="figure5" ref-type="fig">Figure 5</xref>). However, when trained on the MotionSense data set and tested on our own data, the accuracy of DANA drops to around 26%, also depending on the dimensionality of the input, while our model performs at around 63%, but much less robust against the dimensionality input (<xref rid="figure6" ref-type="fig">Figure 6</xref>). This still leaves room for improvement but shows the comparatively high generalization ability of our model. It is important to note that neither the MotionSense data nor our own data include magnetometer data, which is why the DANA model performs poorly (at or near zero accuracy) when reduced to only magnetometer input. The graph includes this information for consistency.</p>
      <fig id="figure5" position="float">
        <label>Figure 5</label>
        <caption>
          <p>Accuracy in classifying using the dimension-adaptive neural architecture (DANA) model (A) trained and tested on MotionSense data; (B) our model trained and tested on our data; (C) DANA trained on MotionSense and tested on our data; and (D) our model trained on our own data and tested on MotionSense data. Note that the dimensionality is varied here to showcase the robustness, and our model is impacted more strongly by a varied dimensionality input. Acc: accelerometer; Gyr: gyroscope; Mag: magnetometer.</p>
        </caption>
        <graphic xlink:href="ai_v2i1e42337_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <fig id="figure6" position="float">
        <label>Figure 6</label>
        <caption>
          <p>Confusion matrices of accuracy in classifying (A) using our own simplified model trained on MotionSense data tested on MotionSense data; (B) trained on MotionSense data and tested on own data; (C) trained and tested on our own data; and (D) trained on our own data and tested on MotionSense data. Note that dimensionality is not varied here as all sensors are available. dws: downstairs; jog: jogging; sit: sitting: std: standing; ups: upstairs; wlk: walking.</p>
        </caption>
        <graphic xlink:href="ai_v2i1e42337_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <p>Our simplified model does not include the DAP layer and is less robust against input dimensional variance, as our input data dimensions did not vary. However, it is easily adaptable if desired. Despite this, our model outperforms the DANA model in terms of accuracy. When trained on the MotionSense data set and tested on it, our model achieved 95.4% accuracy. It was equally accurate when trained on our own data and tested on it, with 92.4% accuracy. However, when trained on the MotionSense data and tested on our own data, accuracy drops to 25.8%, but when trained on our data and tested on MotionSense, accuracy reached 63.4%.</p>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Conclusions</title>
        <p>Both models included in our toolbox perform well when trained and tested on the same data set. However, they do not perform well when trained on one data set and tested on the other, as was the case in our study. This highlights the problem of the unavoidable part of overfitting the collected data to improve algorithm performance, although this is controlled for as far as possible. Despite this, both models (DANA and our own) performed similarly when trained on one data set and tested on the other. Our model is slightly more accurate, but the DANA model is more robust with regards to dimensional variance in the input. However, there is a significant difference in computing time when training the models. The DANA model, when trained using Google Colab with CPU and GPU resources, took around 11 hours to train each time. On the other hand, our model can be trained in about 5 minutes with 100 epochs of training using only CPUs in Google Colab. Note that this estimation does not include hyperparameter testing.</p>
        <p>Given the amount of data used to train the models, the results are surprisingly accurate. Commercial wearables, such as sports-oriented smartwatches, often have a function to display the user’s current activity. However, these displayed activities are often incorrect, even for activities that seem obvious to the user. Considering these devices are widely available and sold to millions of people, we expected movement detection to be much more challenging, and our accuracy to be in the low 60% range.</p>
        <p>While the accuracy of movement classification is very good, there is still room for improvement, which we plan to achieve by training the algorithm on additional data from diverse populations or environments. We recommend using the DANA model to classify behavior in data that have been gathered at different dimensions or with variable input dimensions. However, if the input type is consistent, we recommend our model as it is slightly more accurate and much easier to train. Both algorithms are available at our Github repository, along with the <italic>HumanActivityRecorder</italic> app and the scripts to process the data. In a future step, we plan to integrate both algorithms into the app and evaluate their performance in a subsequent study.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>The orientation of the smartphone during recording has an impact on classification accuracy if the sample size is not large enough, as shown in our comparison of classification accuracy of groups 1 and 2 (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). However, if trained on large data sets with varying orientation, this effect disappears. For comparability, we based our model on the group with the same orientation as in the <italic>MotionSense</italic> data set. Accounting for orientation was outside the scope of our study. To address the impact of smartphone orientation on classification accuracy in medium-sized samples, an easy solution would be to incorporate an orientation recognition stage that detects the orientation of the smartphone and branches the data to models that have been individually trained on each orientation. This would ensure more accurate classification regardless of the smartphone orientation.</p>
      </sec>
      <sec>
        <title>Authenticity</title>
        <p>The results of the study are presented clearly, honestly, and without fabrication, falsification, or inappropriate data manipulation. The results of this study do not constitute endorsement by this Journal. This manuscript has not been published elsewhere, and it has not been submitted simultaneously for publication elsewhere.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Screenshots of the Android app. From left to right: start screen, sociodemographics, and recording screen.</p>
        <media xlink:href="ai_v2i1e42337_app1.png" xlink:title="PNG File , 151 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Accuracy of the classification of our model (A) trained and tested on group 1 data; (B) trained on group 1 data and tested on MotionSense data; (C) trained and tested on group 2 data; and (D) trained on group 2 data and tested on MotionSense data. Group 1 was instructed to wear the smartphone wherever they preferred individually. Group 2 was instructed to wear it screen inside, top facing downward in the right trouser pocket, in line with data collection for the MotionSense data set, to ensure maximum comparability.</p>
        <media xlink:href="ai_v2i1e42337_app2.png" xlink:title="PNG File , 139 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">DANA</term>
          <def>
            <p>dimension-adaptive neural architecture</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">DAP</term>
          <def>
            <p>dimension-adaptive pooling</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">DNN</term>
          <def>
            <p>deep neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">FNN</term>
          <def>
            <p>feedforward neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">RNN</term>
          <def>
            <p>recurrent neural network</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research received no specific grant from any funding agency in the public, commercial, or not-for-profit sectors.</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>All data used are available [<xref ref-type="bibr" rid="ref28">28</xref>].</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>FW was the principal investigator, drafted the manuscript, and trained the algorithm; CN provided guidance for publishing.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Number of smartphone mobile network subscriptions worldwide from 2016 to 2022, with forecasts from 2023 to 2028</article-title>
          <source>Statista</source>
          <access-date>2023-05-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.statista.com/statistics/330695/number-of-smartphone-users-worldwide">http://www.statista.com/statistics/330695/number-of-smartphone-users-worldwide</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <article-title>Mobile Consumer Survey 2017: The UK cut</article-title>
          <source>Deloitte</source>
          <access-date>2023-05-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.deloitte.co.uk/mobileuk2017/">https://www.deloitte.co.uk/mobileuk2017/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tacconi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mellone</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chiari</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Smartphone-Based Applications for Investigating Falls and Mobility</article-title>
          <year>2011</year>
          <conf-name>Proceedings of the 5th International ICST Conference on Pervasive Computing Technologies for Healthcare</conf-name>
          <conf-date>May 23-26, 2011</conf-date>
          <conf-loc>Dublin, Republic of Ireland</conf-loc>
          <pub-id pub-id-type="doi">10.4108/icst.pervasivehealth.2011.246060</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mehta</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Zañartu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Cheyne</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Hillman</surname>
              <given-names>RE</given-names>
            </name>
          </person-group>
          <article-title>Mobile Voice Health Monitoring Using a Wearable Accelerometer Sensor and a Smartphone Platform</article-title>
          <source>IEEE Trans. Biomed. Eng</source>
          <year>2012</year>
          <month>11</month>
          <volume>59</volume>
          <issue>11</issue>
          <fpage>3090</fpage>
          <lpage>3096</lpage>
          <pub-id pub-id-type="doi">10.1109/tbme.2012.2207896</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Garcia-Ceja</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Osmani</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Mayora</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Automatic Stress Detection in Working Environments From Smartphones’ Accelerometer Data: A First Step</article-title>
          <source>IEEE J. Biomed. Health Inform</source>
          <year>2016</year>
          <month>7</month>
          <volume>20</volume>
          <issue>4</issue>
          <fpage>1053</fpage>
          <lpage>1060</lpage>
          <pub-id pub-id-type="doi">10.1109/jbhi.2015.2446195</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fino</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Mazzetti</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Monitoring healthy and disturbed sleep through smartphone applications: a review of experimental evidence</article-title>
          <source>Sleep Breath</source>
          <year>2019</year>
          <month>03</month>
          <day>23</day>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>13</fpage>
          <lpage>24</lpage>
          <pub-id pub-id-type="doi">10.1007/s11325-018-1661-3</pub-id>
          <pub-id pub-id-type="medline">29687190</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11325-018-1661-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lau</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>David</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Movement recognition using the accelerometer in smartphones</article-title>
          <year>2010</year>
          <conf-name>2010 Future Network &#38; Mobile Summit</conf-name>
          <conf-date>June 16-18, 2010</conf-date>
          <conf-loc>Florence, Italy</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Activity Recognition Using Hierarchical Hidden Markov Models on a Smartphone with 3D Accelerometer</article-title>
          <year>2011</year>
          <conf-name>HAIS 2011: Hybrid Artificial Intelligent Systems</conf-name>
          <conf-date>September 22-24, 2021</conf-date>
          <conf-loc>Bilbao, Spain</conf-loc>
          <fpage>460</fpage>
          <lpage>67</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-642-21219-2_58</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wannenburg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Malekian</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Physical Activity Recognition From Smartphone Accelerometer Data for User Context Awareness Sensing</article-title>
          <source>IEEE Trans. Syst. Man Cybern, Syst</source>
          <year>2017</year>
          <month>12</month>
          <volume>47</volume>
          <issue>12</issue>
          <fpage>3142</fpage>
          <lpage>3149</lpage>
          <pub-id pub-id-type="doi">10.1109/tsmc.2016.2562509</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Case</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Burwick</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Volpp</surname>
              <given-names>KG</given-names>
            </name>
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>MS</given-names>
            </name>
          </person-group>
          <article-title>Accuracy of smartphone applications and wearable devices for tracking physical activity data</article-title>
          <source>JAMA</source>
          <year>2015</year>
          <month>02</month>
          <day>10</day>
          <volume>313</volume>
          <issue>6</issue>
          <fpage>625</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2014.17841</pub-id>
          <pub-id pub-id-type="medline">25668268</pub-id>
          <pub-id pub-id-type="pii">2108876</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Hao</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for sensor-based activity recognition: A survey</article-title>
          <source>Pattern Recognition Letters</source>
          <year>2019</year>
          <month>03</month>
          <volume>119</volume>
          <fpage>3</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1016/j.patrec.2018.02.010</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>San</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Krishnaswamy</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Deep Convolutional Neural Networks on Multichannel Time Series for Human Activity Recognition</article-title>
          <year>2015</year>
          <conf-name>Proceedings of the 24th International Conference on Artificial Intelligence</conf-name>
          <conf-date>July 25-31, 2015</conf-date>
          <conf-loc>Buenos Aires, Argentina</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ronao</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Human activity recognition with smartphone sensors using deep learning neural networks</article-title>
          <source>Expert Systems with Applications</source>
          <year>2016</year>
          <month>10</month>
          <volume>59</volume>
          <fpage>235</fpage>
          <lpage>244</lpage>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2016.04.032</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ignatov</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Real-time human activity recognition from accelerometer data using Convolutional Neural Networks</article-title>
          <source>Applied Soft Computing</source>
          <year>2018</year>
          <month>01</month>
          <volume>62</volume>
          <fpage>915</fpage>
          <lpage>922</lpage>
          <pub-id pub-id-type="doi">10.1016/j.asoc.2017.09.027</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ordóñez</surname>
              <given-names>FJ</given-names>
            </name>
            <name name-style="western">
              <surname>Roggen</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Deep Convolutional and LSTM Recurrent Neural Networks for Multimodal Wearable Activity Recognition</article-title>
          <source>Sensors (Basel)</source>
          <year>2016</year>
          <month>01</month>
          <day>18</day>
          <volume>16</volume>
          <issue>1</issue>
          <fpage>115</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s16010115"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s16010115</pub-id>
          <pub-id pub-id-type="medline">26797612</pub-id>
          <pub-id pub-id-type="pii">s16010115</pub-id>
          <pub-id pub-id-type="pmcid">PMC4732148</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chevalier</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Deep Residual Bidir-LSTM for Human Activity Recognition Using Wearable Sensors</article-title>
          <source>Mathematical Problems in Engineering</source>
          <year>2018</year>
          <month>12</month>
          <day>30</day>
          <volume>2018</volume>
          <fpage>1</fpage>
          <lpage>13</lpage>
          <pub-id pub-id-type="doi">10.1155/2018/7316954</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Abdelzaher</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>DeepSense: A Unified Deep Learning Framework for Time-Series Mobile Sensing Data Processing</article-title>
          <year>2017</year>
          <conf-name>Proceedings of the 26th International Conference on World Wide Web</conf-name>
          <conf-date>April 3--7, 2017</conf-date>
          <conf-loc>Perth, Australia</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3038912.3052577</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jeyakumar</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lai</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Suda</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Srivastava</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>SenseHAR: a robust virtual activity sensor for smartphones and wearables</article-title>
          <year>2019</year>
          <conf-name>Proceedings of the 17th Conference on Embedded Networked Sensor Systems</conf-name>
          <conf-date>November 10-13, 2019</conf-date>
          <conf-loc>New York, USA</conf-loc>
          <fpage>15</fpage>
          <lpage>28</lpage>
          <pub-id pub-id-type="doi">10.1145/3356250.3360032</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Malekzadeh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Clegg</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Cavallaro</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Haddadi</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Privacy and utility preserving sensor-data transformations</article-title>
          <source>Pervasive and Mobile Computing</source>
          <year>2020</year>
          <month>03</month>
          <volume>63</volume>
          <fpage>101132</fpage>
          <pub-id pub-id-type="doi">10.1016/j.pmcj.2020.101132</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schmidhuber</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep learning in neural networks: an overview</article-title>
          <source>Neural Netw</source>
          <year>2015</year>
          <month>01</month>
          <volume>61</volume>
          <fpage>85</fpage>
          <lpage>117</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neunet.2014.09.003</pub-id>
          <pub-id pub-id-type="medline">25462637</pub-id>
          <pub-id pub-id-type="pii">S0893-6080(14)00213-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goodfellow</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Bengio</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Courville</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>Deep Learning</source>
          <year>2016</year>
          <publisher-loc>Cambridge, Massachusetts, USA</publisher-loc>
          <publisher-name>MIT press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bengio</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Courville</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vincent</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Representation learning: a review and new perspectives</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>2013</year>
          <month>08</month>
          <volume>35</volume>
          <issue>8</issue>
          <fpage>1798</fpage>
          <lpage>828</lpage>
          <pub-id pub-id-type="doi">10.1109/TPAMI.2013.50</pub-id>
          <pub-id pub-id-type="medline">23787338</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Gill</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Missing value imputation for physical activity data measured by accelerometer</article-title>
          <source>Stat Methods Med Res</source>
          <year>2018</year>
          <month>02</month>
          <day>17</day>
          <volume>27</volume>
          <issue>2</issue>
          <fpage>490</fpage>
          <lpage>506</lpage>
          <pub-id pub-id-type="doi">10.1177/0962280216633248</pub-id>
          <pub-id pub-id-type="medline">26994215</pub-id>
          <pub-id pub-id-type="pii">0962280216633248</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Network In Network</article-title>
          <source>arXiv</source>
          <year>2014</year>
          <fpage>1</fpage>
          <lpage>10</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/1312.4400"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.1312.4400</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Islam</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Nooruddin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Karray</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Muhammad</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Human activity recognition using tools of convolutional neural networks: A state of the art review, data sets, challenges, and future prospects</article-title>
          <source>Comput Biol Med</source>
          <year>2022</year>
          <month>10</month>
          <volume>149</volume>
          <fpage>106060</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106060</pub-id>
          <pub-id pub-id-type="medline">36084382</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(22)00773-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Malekzadeh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Clegg</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cavallaro</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Haddadi</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>DANA</article-title>
          <source>Proc. ACM Interact. Mob. Wearable Ubiquitous Technol</source>
          <year>2021</year>
          <month>09</month>
          <day>14</day>
          <volume>5</volume>
          <issue>3</issue>
          <fpage>1</fpage>
          <lpage>27</lpage>
          <pub-id pub-id-type="doi">10.1145/3478074</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="web">
          <article-title>MotionSense dataset</article-title>
          <source>GitHub</source>
          <access-date>2023-05-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/mmalekzadeh/motion-sense">https://github.com/mmalekzadeh/motion-sense</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="web">
          <article-title>HumanActivityRecorder</article-title>
          <source>GitHub</source>
          <access-date>2023-05-19</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/FluWieland/HumanActivityRecorder">https://github.com/FluWieland/HumanActivityRecorder</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
