<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR AI</journal-id><journal-id journal-id-type="publisher-id">ai</journal-id><journal-id journal-id-type="index">41</journal-id><journal-title>JMIR AI</journal-title><abbrev-journal-title>JMIR AI</abbrev-journal-title><issn pub-type="epub">2817-1705</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v5i1e84984</article-id><article-id pub-id-type="doi">10.2196/84984</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Deep Learning for Age Estimation and Sex Prediction Using Mandibular-Cropped Cephalometric Images: Comparative Model Development and Validation Study</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Handayani</surname><given-names>Vitria Wuri</given-names></name><degrees>DMD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Margaretha Amiatun Ruth</surname><given-names>Mieke Sylvia</given-names></name><degrees>DDS, PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Rulaningtyas</surname><given-names>Riries</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Kurniawan</surname><given-names>Arofi</given-names></name><degrees>DMD, PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff5">5</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Yudhantorro</surname><given-names>Bayu Azra</given-names></name><degrees>M Kom</degrees><xref ref-type="aff" rid="aff6">6</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Yudianto</surname><given-names>Ahmad</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff5">5</xref><xref ref-type="aff" rid="aff7">7</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Faculty of Medicine, Universitas Airlangga</institution><addr-line>Surabaya</addr-line><country>Indonesia</country></aff><aff id="aff2"><institution>Nursing Department, Poltekkes Kemenkes Pontianak</institution><addr-line>Pontianak</addr-line><country>Indonesia</country></aff><aff id="aff3"><institution>Division of Forensic Odontology, Faculty of Dental Medicine, Universitas Airlangga</institution><addr-line>Surabaya</addr-line><country>Indonesia</country></aff><aff id="aff4"><institution>Forensics and Medicolegal Department, Faculty of Medicine, Universitas Airlangga</institution><addr-line>Surabaya</addr-line><addr-line>East Java</addr-line><country>Indonesia</country></aff><aff id="aff5"><institution>Postgraduate School, Universitas Airlangga</institution><addr-line>Surabaya</addr-line><country>Indonesia</country></aff><aff id="aff6"><institution>Department of Information Systems, Institut Sepuluh Nopember</institution><addr-line>Surabaya</addr-line><country>Indonesia</country></aff><aff id="aff7"><institution>Forensics and Medicolegal Department, Faculty of Medicine, Universitas Airlangga</institution><addr-line>Surabaya, East Java 60132, Indonesia</addr-line><addr-line>Surabaya</addr-line><country>Indonesia</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Huo</surname><given-names>Yuankai</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Aryanto</surname><given-names>Anang</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Santos</surname><given-names>Rui</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Ahmad Yudianto, MD, PhD, Forensics and Medicolegal Department, Faculty of Medicine, Universitas Airlangga, Surabaya, East Java 60132, Indonesia, Surabaya, 60131, Indonesia, 62 81330198281; <email>ahmad-yudianto@fk.unair.ac.id</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>18</day><month>3</month><year>2026</year></pub-date><volume>5</volume><elocation-id>e84984</elocation-id><history><date date-type="received"><day>29</day><month>09</month><year>2025</year></date><date date-type="accepted"><day>19</day><month>12</month><year>2025</year></date></history><copyright-statement>&#x00A9; Vitria Wuri Handayani, Mieke Sylvia Margaretha Amiatun Ruth, Riries Rulaningtyas, Arofi Kurniawan, Bayu Azra Yudhantorro, Ahmad Yudianto. Originally published in JMIR AI (<ext-link ext-link-type="uri" xlink:href="https://ai.jmir.org">https://ai.jmir.org</ext-link>), 18.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.ai.jmir.org/">https://www.ai.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://ai.jmir.org/2026/1/e84984"/><abstract><sec><title>Background</title><p>Mandibular structures offer resilient features for forensic identification where partial remains are available in postmortem condition. Deep learning applied to cephalometric radiographs offers an opportunity to predict demographic attributes, such as age and sex, which are critical in forensic and clinical contexts.</p></sec><sec><title>Objective</title><p>This study aimed to develop and evaluate a multitask deep learning framework for age estimation and sex prediction from cropped mandibular regions of cephalometric radiographs, comparing multiple convolutional neural network backbones and preprocessing scenarios to address class imbalance.</p></sec><sec sec-type="methods"><title>Methods</title><p>A total of 340 anonymized cephalometric radiographs from Indonesian individuals aged 8 to 40 years were collected and manually cropped into 2 mandibular regions of interest: mandibular length and mandibular angle, producing 680 validated samples. Images were resized to 224&#x00D7;224 pixels and processed under 4 preprocessing scenarios: original, Synthetic Minority Oversampling Technique, StandardScaler, and Synthetic Minority Oversampling Technique+StandardScaler. Six pretrained convolutional neural network backbones (MobileNetV2, ResNet50V2, InceptionV3, InceptionResNetV2, VGG16, and VGG19) were fine-tuned within a multitask framework. Performance was evaluated using mean absolute error and mean absolute percentage error for age estimation and accuracy and <italic>F</italic><sub>1</sub>-score for sex prediction.</p></sec><sec sec-type="results"><title>Results</title><p>VGG16 achieved the best performance for age estimation, with the lowest mean absolute error of 3.19 years and mean absolute percentage error of 13.19% in the original dataset. For sex prediction, VGG16 achieved the highest accuracy (86%) and balanced <italic>F</italic><sub>1</sub>-scores (female: 92%; male: 63%) under the StandardScaler condition, followed by VGG19 (accuracy=82%).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Combining mandibular cropping with deep learning and balanced preprocessing scenarios enhances demographic prediction in cephalometric radiographs. The findings emphasize the potential use of artificial intelligence&#x2013;assisted forensic odontology to support disaster victim identification when partial remains are available.</p></sec></abstract><kwd-group><kwd>artificial intelligence in medical imaging</kwd><kwd>age estimation</kwd><kwd>cephalometric radiograph</kwd><kwd>preprocessing deep learning</kwd><kwd>sex prediction</kwd><kwd>artificial intelligence</kwd><kwd>AI</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Forensic investigators rely on age and sex as key identifiers in biological profiling [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. Accurate age estimation and sex prediction are fundamental not only for forensic investigations but also for disaster victim identification, archeological research, and clinical applications [<xref ref-type="bibr" rid="ref4">4</xref>]. These parameters should provide the baseline for reconstructing biological profiles and ensuring reliable identification in various contexts such as when a mandible is found [<xref ref-type="bibr" rid="ref5">5</xref>]. The mandible, as one of the strongest and most resilient bones in the human body, retains essential anatomical markers and plays a crucial role in disaster victim identification [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. Different anatomical mandible features, such as the structure of the dental arcade, the jaw angle, and the presence or absence of the teeth, can yield important data on an individual&#x2019;s age, sex, ancestry, and personal identity [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. Investigators apply these features to aid biological profiling and establish the identity of deceased individuals [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>].</p><p>Conventional approaches, including morphometric analysis and manual radiographic evaluation, depend strongly on the judgment of observers and often produce inconsistent outcomes [<xref ref-type="bibr" rid="ref15">15</xref>]. This highlights the importance of developing objective, standardized, and reproducible methods that can minimize subjectivity and improve diagnostic consistency. Advances in artificial intelligence (AI), particularly deep learning networks, now automate medical image analysis and improve diagnostic efficiency and reproducibility [<xref ref-type="bibr" rid="ref16">16</xref>-<xref ref-type="bibr" rid="ref18">18</xref>]. Neural network models further expand new opportunities by automating and enhancing the precision of sex prediction and human identification based on mandibular characteristics [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Deep learning models such as convolutional neural networks (CNNs), with their hierarchical feature extraction mechanisms, excel in pattern recognition tasks involving medical imaging and demonstrate strong potential for predicting demographic traits including age and sex.</p><p>A previous study evaluated mandibular parameters using digital orthopantomography in the Indian population and reported that bigonial width was most effective for age estimation, while the antegonial angle, a mandibular angle parameter, was the most reliable for sex determination [<xref ref-type="bibr" rid="ref14">14</xref>]. In our preliminary study, we used artificial neural networks for sex prediction using mandibular parameters in the Indonesian population and found that 2 parameters (mandibular length and mandibular angle) were the most influential, although performance varied due to dataset imbalance and preprocessing techniques [<xref ref-type="bibr" rid="ref16">16</xref>].</p><p>Building on this insight, we evaluated whether cropping cephalometric images to focus on mandibular angle and mandibular length could enhance prediction accuracy. We conducted a comparative study under 4 preprocessing scenarios (the original dataset, the Synthetic Minority Oversampling Technique [SMOTE], StandardScaler normalization, and SMOTE+StandardScaler), using 6 pretrained deep learning models (MobileNetV2, ResNet50V2, InceptionV3, InceptionResNetV2, VGG16, and VGG19). This study aimed to refine AI-based demographic prediction pipelines for cephalometric imaging by addressing dataset imbalance and limitations of conventional methods, an approach that has remained minimally explored in forensic odontology.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Overview</title><p>The study workflow can be seen in <xref ref-type="fig" rid="figure1">Figure 1</xref>. This study used a deep learning pipeline organized into 3 sequential stages.</p><p>First, image preprocessing was performed. In this stage, cephalometric radiographs were cropped into mandibular regions&#x2014;the mandibular angle and mandibular length. Four preprocessing strategies were then applied (original, SMOTE, StandardScaler, and SMOTE+StandardScaler), combined with image augmentation.</p><p>Second, model development was conducted. Six CNN architectures (MobileNetV2, ResNet50V2, InceptionV3, InceptionResNetV2, VGG16, and VGG19) were adapted through transfer learning. Each mandibular region was processed separately, features were flattened, and the outputs merged into a single representation.</p><p>Third, multitask prediction was performed. The integrated features were used to generate 2 outputs (age estimation and sex prediction).</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Workflow of the deep learning&#x2013;based age and sex prediction model using cropped cephalometric radiographs. (A) Input full cephalometric images; (B) cropped mandibular length images; (C) cropped mandibular angle images; (D) multistream deep learning framework for joint sex prediction and age estimation.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e84984_fig01.png"/></fig></sec><sec id="s2-2"><title>Dataset</title><p>The data used in this study were obtained from the Department of Radiology at the Universitas Airlangga Dental and Mouth Hospital between 2019 and February 2023. A total of 340 anonymized cephalometric radiographs were collected from Indonesian individuals aged 8 to 40 years (<xref ref-type="fig" rid="figure1">Figure 1A</xref>). All images were standardized to a resolution of 224&#x00D7;224 pixels before analysis. Each radiograph was manually cropped into 2 regions of interest&#x2014;the mandibular length (<xref ref-type="fig" rid="figure1">Figure 1B</xref>) and the mandibular angle (<xref ref-type="fig" rid="figure1">Figure 1C</xref>)&#x2014;by the research team and subsequently validated by licensed dentists with a minimum of 5 years of clinical experience. This procedure produced 680 image samples.</p></sec><sec id="s2-3"><title>Preprocessing Strategies</title><p>To examine the impact of balancing and normalization, 4 distinct scenarios were tested (<xref ref-type="table" rid="table1">Table 1</xref>):</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Description and purpose of the preprocessing scenario. Each scenario was implemented independently under identical conditions to allow fair comparison.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Scenario</td><td align="left" valign="bottom">Description</td><td align="left" valign="bottom">Purpose</td></tr></thead><tbody><tr><td align="left" valign="top">Original</td><td align="left" valign="top">Raw cropped images without balancing or normalization</td><td align="left" valign="top">Baseline comparison</td></tr><tr><td align="left" valign="top">SMOTE<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">SMOTE applied to sex classes</td><td align="left" valign="top">Address dataset imbalance</td></tr><tr><td align="left" valign="top">StandardScaler</td><td align="left" valign="top">Pixel intensity standardized to zero mean and unit variance</td><td align="left" valign="top">Normalize intensity distribution</td></tr><tr><td align="left" valign="top">SMOTE+StandardScaler</td><td align="left" valign="top">Combination of SMOTE and StandardScaler</td><td align="left" valign="top">Assess combined effect</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>SMOTE: Synthetic Minority Oversampling Technique.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s2-4"><title>Image Augmentation</title><p>Image augmentation was applied to the training set using the Keras ImageDataGenerator to mitigate the small number of datasets. These augmentation techniques synthetically increased dataset diversity and helped the deep learning models learn more invariant features from the mandibular anatomy. The augmentation configuration included random rotation range up to 90 degrees, zooming in or out up to 25%, random horizontal flips and vertical flips up to 25%, and nearest neighbor interpolation for missing pixels. Image augmentation was applied only to the training set and implemented uniformly across all experiments at the image level, using identical configurations for both age estimation and sex prediction tasks. The purpose of augmentation was to increase data variability rather than to achieve exact numerical class balancing.</p></sec><sec id="s2-5"><title>CNN Architectures</title><p>Six CNNs were selected for their proven utility in medical imaging and differing levels of complexity:</p><list list-type="order"><list-item><p>MobileNetV2 (this captures lightweight and efficient, suitable for limited datasets)</p></list-item><list-item><p>ResNet50V2 (this captures residual connections reduce vanishing gradient problems)</p></list-item><list-item><p>InceptionV3</p></list-item><list-item><p>InceptionResNetV2 (these capture multiscale contextual features)</p></list-item><list-item><p>VGG16</p></list-item><list-item><p>VGG19 (these are classical deep CNNs serving as baselines)</p></list-item></list><p>All models were initialized with ImageNet weights and fine-tuned. Features from both mandibular regions were extracted, flattened, concatenated, and processed through a multitask output structure.</p></sec><sec id="s2-6"><title>Training Procedure</title><p>Images were resized to 224&#x00D7;224 pixels and the pixel values normalized to a 0 to 1 [0,1] range and divided into training (70%), validation (15%), and testing (15%) subsets, ensuring no participant overlap between sets. We used TensorFlow and the Adam optimizer with a learning rate of 1&#x00D7;10<sup>&#x2013;</sup>&#x2074;. Huber loss was applied to the age estimation output, while binary cross-entropy with label smoothing (0.05) was used for the sex prediction output. We did not use class weighting in the loss function, but we relied on SMOTE to mitigate imbalance together with label smoothing. To prevent overfitting, we applied regularization techniques such as dropout (0.5), early stopping with a patience of 10, and learning rate reduction on a plateau. Training was conducted for up to 100 epochs with a batch size of 32, with early stopping based on validation performance. Given the relatively small dataset, model training was closely monitored using validation performance to further mitigate overfitting. A unified hyperparameter configuration was applied across all architectures to ensure a controlled and fair comparative evaluation.</p></sec><sec id="s2-7"><title>Evaluation</title><p>We examined the CNN&#x2019;s performance on each task separately. For sex prediction, the metrics included accuracy, precision, and <italic>F</italic><sub>1</sub>-score, while age estimation was assessed using mean absolute error (MAE) in years and mean absolute percentage error (MAPE) in percent. All evaluations were conducted on the held-out test set across every preprocessing scenario and CNN architecture to ensure consistency and comparability of results.</p></sec><sec id="s2-8"><title>Ethical Considerations</title><p>This research used an archived dataset of cephalometric radiographs sourced from the Department of Radiology at Universitas Airlangga Dental and Mouth Hospital from March 2019 to February 2023, and the requirement for informed consent was waived by the institutional review board. No intervention or direct contact with participants occurred. The dataset remains inaccessible to the public owing to institutional data-sharing policies and considerations regarding patient privacy. All methods adhered to applicable guidelines and regulations, including the Declaration of Helsinki and institutional ethical standards. The Dental Faculty of Universitas Airlangga approved the experimental protocols (316/HERCC.FODM/III/2023). We anonymized patient records before conducting the analysis to protect confidentiality and uphold ethical guidelines.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Dataset Distribution and Preprocessing</title><p>The dataset comprised 340 cephalometric radiographs collected from Indonesian individuals aged 8 to 40 years. Each image was manually cropped to isolate 2 mandibular regions&#x2014;the mandibular length (<xref ref-type="fig" rid="figure1">Figure 1B</xref>) and the mandibular angle (<xref ref-type="fig" rid="figure1">Figure 1C</xref>), producing a total of 680 image inputs. <xref ref-type="table" rid="table2">Table 2</xref> summarizes the distribution of image samples by sex and age group, showing a 3:1 female-to-male ratio and a strong concentration of samples in the age range of 16 to 25 years. Age-group frequencies are reported at the image level (2 cropped mandibular images per individual).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Cephalometric sample distribution data by sex and age group (N=680).</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top" colspan="2">Variable</td><td align="left" valign="top">Participants, n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Sex</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Female</td><td align="left" valign="top">510 (75.0)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Male</td><td align="left" valign="top">170 (25.0)</td></tr><tr><td align="left" valign="top" colspan="3">Age group (y)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">11&#x2010;15</td><td align="left" valign="top">16 (2.4)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">16&#x2010;20</td><td align="left" valign="top">232 (34.1)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">21&#x2010;25</td><td align="left" valign="top">282 (41.5)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">26&#x2010;30</td><td align="left" valign="top">76 (11.2)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">31&#x2010;35</td><td align="left" valign="top">56 (8.2)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">36&#x2010;40</td><td align="left" valign="top">18 (2.6)</td></tr></tbody></table></table-wrap></sec><sec id="s3-2"><title>Model Architecture Overview</title><p>Six pretrained CNN architectures (MobileNetV2, ResNet50V2, InceptionV3, InceptionResNetV2, VGG16, and VGG19) were assessed under 4 preprocessing scenarios: original, SMOTE, StandardScaler, and SMOTE+StandardScaler. The dual-input framework combined mandibular length and angle features for joint prediction of sex and age.</p></sec><sec id="s3-3"><title>Age Estimation Performance</title><p>Model performance in age estimation was evaluated using MAE and MAPE. <xref ref-type="table" rid="table3">Table 3</xref> presents the results across all architectures and preprocessing strategies.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Test set result for age estimation<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup>.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Scenario and pretrained convolutional neural network architectures</td><td align="left" valign="bottom">Mean absolute error<break/>(years)</td><td align="left" valign="bottom">Mean absolute percentage error<break/>(%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Original</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MobileNetV2</td><td align="left" valign="top">4.26</td><td align="left" valign="top">16.72</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50V2</td><td align="left" valign="top">4.28</td><td align="left" valign="top">17.27</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">4.50</td><td align="left" valign="top">17.73</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionResNetV2</td><td align="left" valign="top">4.11</td><td align="left" valign="top">17.94</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG16<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></td><td align="left" valign="top"><italic>3.19</italic></td><td align="left" valign="top"><italic>13.19<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></italic></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19</td><td align="left" valign="top">3.80</td><td align="left" valign="top">15.80</td></tr><tr><td align="left" valign="top" colspan="3">SMOTE<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MobileNetV2</td><td align="left" valign="top">4.15</td><td align="left" valign="top">16.85</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50V2</td><td align="left" valign="top">3.40</td><td align="left" valign="top">16.95</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">4.67</td><td align="left" valign="top">19.84</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionResNetV2</td><td align="left" valign="top">4.84</td><td align="left" valign="top">19.05</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG16</td><td align="left" valign="top">4.32</td><td align="left" valign="top">16.69</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19</td><td align="left" valign="top">3.98</td><td align="left" valign="top">16.03</td></tr><tr><td align="left" valign="top" colspan="3">StandardScaler</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MobileNetV2</td><td align="left" valign="top">4.33</td><td align="left" valign="top">16.90</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50V2</td><td align="left" valign="top">4.76</td><td align="left" valign="top">18.81</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">4.59</td><td align="left" valign="top">17.80</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionResNetV2</td><td align="left" valign="top">3.92</td><td align="left" valign="top">15.23</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG16</td><td align="left" valign="top">3.44</td><td align="left" valign="top">14.90</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19</td><td align="left" valign="top">3.57</td><td align="left" valign="top">14.35</td></tr><tr><td align="left" valign="top" colspan="3">SMOTE+StandardScaler (SMOTE+Standard Scaler)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MobileNetV2</td><td align="left" valign="top">4.27</td><td align="left" valign="top">16.86</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50V2</td><td align="left" valign="top">4.74</td><td align="left" valign="top">18.83</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">4.09</td><td align="left" valign="top">17.21</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionResNetV2</td><td align="left" valign="top">3.58</td><td align="left" valign="top">14.85</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG16</td><td align="left" valign="top">3.48</td><td align="left" valign="top">14.60</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19</td><td align="left" valign="top">3.72</td><td align="left" valign="top">15.31</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>Italics denote the best performance.</p></fn><fn id="table3fn2"><p><sup>b</sup>SMOTE: Synthetic Minority Oversampling Technique.</p></fn></table-wrap-foot></table-wrap><p>VGG16 achieved the lowest error rates, particularly in the original scenario (MAE=3.19 years; MAPE=13.19%), establishing a strong baseline. VGG19 also demonstrated robust performance across most scenarios. In contrast, InceptionV3 and InceptionResNetV2 consistently achieved higher errors, particularly under the SMOTE scenario, suggesting that synthetic oversampling might introduce noise detrimental to these architectures for regression. The StandardScaler and SMOTE+StandardScaler scenarios generally improved stability, reducing the performance gap between models and helping VGG19 achieve its best MAPE (14.35%). These results for MAE variation across models and preprocessing strategies are visualized in <xref ref-type="fig" rid="figure2">Figure 2</xref>.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Age regression error (mean absolute error) across deep learning models and scenarios.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e84984_fig02.png"/></fig></sec><sec id="s3-4"><title>Sex Prediction Performance</title><p>Model performance on sex prediction was evaluated using accuracy, macro <italic>F</italic><sub>1</sub>-score, weighted <italic>F</italic><sub>1</sub>-score, and class-wise <italic>F</italic><sub>1</sub>-scores for female and male categories. <xref ref-type="table" rid="table4">Table 4</xref> summarizes results across all architectures and preprocessing scenarios.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Sex prediction performance across scenarios and models<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup>.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Scenario and pretrained convolutional neural network architectures</td><td align="left" valign="bottom">Accuracy<break/>(%)</td><td align="left" valign="bottom">Macro <italic>F</italic><sub>1</sub>-score (%)</td><td align="left" valign="bottom">Weighted <italic>F</italic><sub>1</sub>-score (%)</td><td align="left" valign="bottom">Female <italic>F</italic><sub>1</sub>-score (%)</td><td align="left" valign="bottom">Male <italic>F</italic><sub>1</sub>-score (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="6">Original</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MobileNetV2</td><td align="left" valign="top">78</td><td align="left" valign="top">57</td><td align="left" valign="top">72</td><td align="left" valign="top">87</td><td align="left" valign="top">27</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50V2</td><td align="left" valign="top">76</td><td align="left" valign="top">50</td><td align="left" valign="top">68</td><td align="left" valign="top">86</td><td align="left" valign="top">14</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">76</td><td align="left" valign="top">50</td><td align="left" valign="top">68</td><td align="left" valign="top">86</td><td align="left" valign="top">0</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionResNetV2</td><td align="left" valign="top">76</td><td align="left" valign="top">60</td><td align="left" valign="top">72</td><td align="left" valign="top">86</td><td align="left" valign="top">33</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG16</td><td align="left" valign="top">84</td><td align="left" valign="top">73</td><td align="left" valign="top">82</td><td align="left" valign="top">90</td><td align="left" valign="top">56</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19</td><td align="left" valign="top">82</td><td align="left" valign="top">73</td><td align="left" valign="top">81</td><td align="left" valign="top">89</td><td align="left" valign="top">57</td></tr><tr><td align="left" valign="top" colspan="6">SMOTE<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MobileNetV2</td><td align="left" valign="top">82</td><td align="left" valign="top">73</td><td align="left" valign="top">81</td><td align="left" valign="top">89</td><td align="left" valign="top">57</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50V2</td><td align="left" valign="top">80</td><td align="left" valign="top">75</td><td align="left" valign="top">81</td><td align="left" valign="top">86</td><td align="left" valign="top">64</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">75</td><td align="left" valign="top">69</td><td align="left" valign="top">75</td><td align="left" valign="top">82</td><td align="left" valign="top">55</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionResNetV2</td><td align="left" valign="top">78</td><td align="left" valign="top">71</td><td align="left" valign="top">78</td><td align="left" valign="top">86</td><td align="left" valign="top">56</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG16</td><td align="left" valign="top">82</td><td align="left" valign="top">71</td><td align="left" valign="top">80</td><td align="left" valign="top">89</td><td align="left" valign="top">53</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19</td><td align="left" valign="top">84</td><td align="left" valign="top">75</td><td align="left" valign="top">83</td><td align="left" valign="top">90</td><td align="left" valign="top">60</td></tr><tr><td align="left" valign="top" colspan="6">StandardScaler</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MobileNetV2</td><td align="left" valign="top">82</td><td align="left" valign="top">68</td><td align="left" valign="top">79</td><td align="left" valign="top">89</td><td align="left" valign="top">47</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50V2</td><td align="left" valign="top">76</td><td align="left" valign="top">50</td><td align="left" valign="top">68</td><td align="left" valign="top">86</td><td align="left" valign="top">14</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">76</td><td align="left" valign="top">50</td><td align="left" valign="top">68</td><td align="left" valign="top">86</td><td align="left" valign="top">14</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionResNetV2</td><td align="left" valign="top">80</td><td align="left" valign="top">63</td><td align="left" valign="top">75</td><td align="left" valign="top">88</td><td align="left" valign="top">38</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG16</td><td align="left" valign="top">86</td><td align="left" valign="top">77</td><td align="left" valign="top">84</td><td align="left" valign="top">92</td><td align="left" valign="top">63</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19</td><td align="left" valign="top">82</td><td align="left" valign="top">68</td><td align="left" valign="top">79</td><td align="left" valign="top">89</td><td align="left" valign="top">47</td></tr><tr><td align="left" valign="top" colspan="6">SMOTE + Standard Scaler</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>MobileNetV2</td><td align="left" valign="top">80</td><td align="left" valign="top">74</td><td align="left" valign="top">80</td><td align="left" valign="top">87</td><td align="left" valign="top">62</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ResNet50V2</td><td align="left" valign="top">73</td><td align="left" valign="top">53</td><td align="left" valign="top">68</td><td align="left" valign="top">83</td><td align="left" valign="top">33</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionV3</td><td align="left" valign="top">84</td><td align="left" valign="top">73</td><td align="left" valign="top">82</td><td align="left" valign="top">90</td><td align="left" valign="top">56</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>InceptionResNetV2</td><td align="left" valign="top">73</td><td align="left" valign="top">71</td><td align="left" valign="top">74</td><td align="left" valign="top">77</td><td align="left" valign="top">65</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG16<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td><td align="left" valign="top"><italic>80</italic></td><td align="left" valign="top"><italic>63</italic></td><td align="left" valign="top"><italic>75</italic></td><td align="left" valign="top"><italic>88</italic></td><td align="left" valign="top"><italic>38</italic></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>VGG19</td><td align="left" valign="top">82</td><td align="left" valign="top">73</td><td align="left" valign="top">81</td><td align="left" valign="top">89</td><td align="left" valign="top">57</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>SMOTE: Synthetic Minority Oversampling Technique.</p></fn><fn id="table4fn2"><p><sup>b</sup>Italics denote the best performance.</p></fn></table-wrap-foot></table-wrap><p><xref ref-type="table" rid="table4">Table 4</xref> shows VGG16 and VGG19 delivered the most accurate and balanced performance. VGG16 achieved the highest stand-alone accuracy of 86% under the StandardScaler scenario, with a male <italic>F</italic><sub>1</sub>-score of 63%. The application of SMOTE consistently improved the male <italic>F</italic><sub>1</sub>-score across nearly all models, for instance, raising ResNet50V2&#x2019;s male <italic>F</italic><sub>1</sub>-score from 14% to 64%, confirming its efficacy in mitigating class imbalance. However, models such as InceptionV3 and ResNet50V2 exhibited high sensitivity to preprocessing, with performance fluctuating significantly across scenarios. Across the evaluated preprocessing scenarios, all CNN architectures demonstrated a male <italic>F</italic><sub>1</sub>-score above 33% in at least 1 scenario, except for InceptionV3 under the original (unbalanced) condition. The comparative accuracy trends are shown in <xref ref-type="fig" rid="figure3">Figure 3</xref>.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Sex prediction accuracy across deep learning models and scenarios. CNN: convolutional neural network; SMOTE<bold>:</bold> Synthetic Minority Oversampling Technique.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e84984_fig03.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Summary of Key Findings</title><p>This study demonstrates that deep learning models, particularly VGG16 and VGG19, can effectively perform joint age estimation and sex prediction from cropped mandibular cephalometric images. The findings emphasize that performance is not solely determined by architectural design but critically depends on preprocessing strategies to address dataset limitations. Synthetic oversampling (ie, SMOTE) was essential in mitigating severe class imbalance and improving fairness in male sex prediction, while the use of accuracy and <italic>F</italic><sub>1</sub>-score provided complementary insights into classifier behavior under imbalance [<xref ref-type="bibr" rid="ref19">19</xref>]. Accuracy offers a straightforward measure of correctness, yet can be misleading when 1 class dominates, whereas the <italic>F</italic><sub>1</sub>-score, by accounting for both precision and recall, provides a more reliable evaluation in such contexts [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. These results emphasize the importance of integrating robust architectures with targeted preprocessing to achieve equitable and reproducible outcomes in forensic odontology.</p></sec><sec id="s4-2"><title>The Strategic Role of Mandibular Cropping in Forensic Imaging</title><p>The preprocessing step of cropping cephalometric images to the mandibular region provides substantial advantages. For example, by removing irrelevant anatomical structures such as the cranial vault and maxilla, background noise is reduced, and the models are directed to focus on the most discriminative features. This cropping approach improves computational efficiency by lowering input dimensionality and enhances interpretability, as the mandible is widely recognized as one of the most sexually dimorphic bones in the craniofacial complex.</p><p>Several studies support the relevance of mandibular-focused analysis. A study by Prabha et al [<xref ref-type="bibr" rid="ref22">22</xref>] demonstrated that mandibular indices derived from lateral cephalograms are highly effective for sex determination, highlighting the forensic importance of isolating mandibular features. Similarly, Gao and Tang [<xref ref-type="bibr" rid="ref23">23</xref>] showed that deep learning frameworks integrating cephalometric landmarks achieve greater accuracy when attention mechanisms prioritize mandibular regions, as these structures carry distinctive morphological cues critical for demographic prediction. In forensic odontology, the mandible is also considered more resistant to postmortem changes compared with other cranial structures, making it a reliable target for identification.</p><p>Cropping images enables more precise landmark detection and reduces interobserver variability from a computational perspective [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. Preprocessing strategies such as region-specific cropping have been shown to improve model precision and generalization, particularly when combined with data-balancing techniques such as SMOTE. Clinically, mandibular length and angle are key determinants in orthodontic diagnosis and maxillofacial treatment planning, reinforcing the dual relevance of cropping for both forensic and medical applications.</p></sec><sec id="s4-3"><title>Age Estimation Performance</title><p>The performance analysis of age estimation highlights the importance of selecting appropriate preprocessing strategies to optimize deep learning models in demographic prediction. While the study was not designed to forecast age within defined intervals, the use of MAE and MAPE as evaluation metrics provides a robust framework for assessing predictive accuracy. MAE serves as a metric used by various recommendation systems to measure the difference between user ratings and predicted scores [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. However, a widely recognized accuracy metric across various fields, often referenced in scholarly articles, is the MAPE [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>].</p><p>The comparative outcomes across different preprocessing scenarios suggest that data balancing and normalization exert distinct influences on model behavior. Oversampling techniques such as SMOTE may introduce synthetic variability that benefits certain architectures but disrupts others, reflecting findings in prior work where oversampling occasionally degraded model generalization in medical imaging tasks [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. Conversely, normalization through StandardScaler consistently improved model generalization, aligning with evidence that standardized input distributions enhance convergence and stability in CNNs [<xref ref-type="bibr" rid="ref32">32</xref>].</p><p>This study is consistent with prior work using VGG16 for age estimation from cervical vertebrae images, which reported an MAE of 3.53 years and an average MAPE of 16.36% in the original (unbalanced) scenario [<xref ref-type="bibr" rid="ref33">33</xref>]. These results indicate that, on average, the predicted age deviated by approximately 3.5 years from the true chronological age, supporting the reliability of deep learning&#x2013;based age estimation in craniofacial imaging [<xref ref-type="bibr" rid="ref33">33</xref>].</p><p>In addition to preprocessing effects, the age distribution of the dataset was uneven, with a strong concentration of samples in the range of 16 to 25 years. This imbalance may have influenced age estimation performance, as models tend to achieve lower prediction errors in age groups that are more frequently represented during training, while performance for underrepresented age ranges may be less stable. Similar effects of age imbalance on regression-based age prediction tasks have been reported in prior studies, highlighting the importance of age-stratified sampling or regression-aware balancing strategies in future work [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>].</p></sec><sec id="s4-4"><title>Sex Prediction Performance</title><p>The performance analysis of sex prediction highlights the persistent challenge of class imbalance, particularly in male prediction, despite strong overall accuracies achieved by deep learning architectures. This imbalance is consistent with prior literature, where female features are often more consistently represented in datasets, leading to biased learning outcomes. Franco et al [<xref ref-type="bibr" rid="ref36">36</xref>] demonstrated that transfer learning approaches outperform models trained from scratch in dental radiograph classification, underscoring the importance of pretrained architectures, such as VGG16 and VGG19, in capturing subtle morphological differences. These findings emphasize that while high accuracy is achievable, equitable performance across sexes remains a methodological priority in forensic odontology.</p><p>Oversampling techniques such as SMOTE proved effective in mitigating imbalance by generating synthetic samples for minority classes. Elreedy et al [<xref ref-type="bibr" rid="ref37">37</xref>] provided a comprehensive analysis of SMOTE, confirming its utility in addressing class imbalance across diverse domains [<xref ref-type="bibr" rid="ref38">38</xref>]. More recent refinements, including abnormal minority handling and Outlier-SMOTE, demonstrate that oversampling can be adapted to improve generalization in sensitive datasets [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. In forensic sex prediction, these approaches are particularly relevant, as they provide more representative training views and reduce bias in male prediction. Furthermore, advanced variants such as MeanRadius-SMOTE have shown superior reliability compared with conventional SMOTE and LR-SMOTE, achieving better predictive accuracy across both majority and minority classes [<xref ref-type="bibr" rid="ref41">41</xref>]. Collectively, these studies reinforce that oversampling is a critical intervention, though its effectiveness remains architecture-dependent.</p><p>Normalization techniques also contributed to improved accuracy, particularly in complex architectures, by ensuring equal feature contributions and reducing the risk of dominant variables overshadowing relevant patterns. Practical guidelines such as those outlined by Brownlee [<xref ref-type="bibr" rid="ref41">41</xref>] highlight the role of StandardScaler and Normalizer in stabilizing training, while empirical studies confirm their impact on supervised classification accuracy [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. The broader generalizability of normalization has demonstrated significant performance gains in electricity consumption forecasting, underscoring its universal relevance across domains [<xref ref-type="bibr" rid="ref44">44</xref>]. Nonetheless, normalization alone does not fully resolve sex prediction disparities, highlighting the need for targeted interventions that combine preprocessing with architectural optimization. Large-scale surveys of deep learning in medical imaging further emphasize that preprocessing and model design must be jointly considered to achieve equitable performance in forensic applications [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>].</p></sec><sec id="s4-5"><title>Interpretation and Implications</title><p>AI-assisted forensic odontology underscores the mandible as a resilient anatomical marker for sex prediction and age estimation when other skeletal elements are unavailable [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. Consistent with Abdelhamid and Desai [<xref ref-type="bibr" rid="ref19">19</xref>], our findings confirm that synthetic oversampling strategies, such as SMOTE, can effectively mitigate data imbalance and improve prediction robustness in limited radiographic datasets. This study also complements the work of Matsuda et al [<xref ref-type="bibr" rid="ref45">45</xref>], who demonstrated that multitask deep learning frameworks improve learning efficiency and generalization across medical imaging tasks.</p><p>Within forensic practice, these results emphasize the mandibular-focused, multitask CNN framework as a practical tool for postmortem identification and disaster victim assessment. By integrating data-balancing and normalization techniques, the proposed approach enhances interpretability, reproducibility, and scalability, paving the way for broader AI applications in forensic odontology and demographic estimation.</p></sec><sec id="s4-6"><title>Conclusions</title><p>This study demonstrates that cropped mandibular regions, particularly the mandibular length and angle, are reliable anatomical indicators for demographic prediction in forensic contexts. Among the CNN architectures evaluated, VGG16 and VGG19 consistently achieved superior accuracy and balanced sex prediction, confirming their suitability for forensic applications. MobileNetV2 benefited from oversampling strategies, while ResNet50V2 and InceptionV3 showed limited performance in male prediction, indicating the need for further refinement. The integration of robust CNN models with mandibular image analysis provides a scalable pathway for automated forensic identification, especially in disaster scenarios and resource-limited settings.</p></sec><sec id="s4-7"><title>Limitations</title><p>This study has several limitations that should be considered when interpreting the findings. First, the dataset was relatively small (680 images from 340 participants) and drawn from a single Indonesian population. This may limit the generalizability of the results to other ethnicities, age ranges, or geographic settings. Moreover, this constraint may increase the risk of overfitting in deep learning models. Future studies should consider using larger, multicenter datasets to enhance robustness and applicability. Second, the pronounced class imbalance, with a 3:1 female-to-male ratio, also influenced the model performance. In addition to sex imbalance, the age distribution was also uneven, with a strong concentration of samples in the age range of 16 to 25 years. This imbalance may have influenced age estimation performance across models and should be addressed in future studies using age-stratified sampling or regression-aware balancing strategies. Third, there are limitations related to methodological and practical considerations. Manual cropping of mandibular regions, despite clinical validation, introduces a degree of subjectivity that may affect reproducibility; automated landmark detection or segmentation methods could address this in future studies.</p><p>Fourth, the focus on only 2 mandibular parameters (length and angle) excludes other potentially informative craniofacial and dental features. Fifth, although VGG16 and VGG19 produced the strongest results, their higher computational demands may limit applicability in time-sensitive forensic workflows. Conversely, lightweight models such as MobileNetV2 offer greater efficiency but at reduced precision. Finally, the models were not evaluated under noisy or degraded imaging conditions common in postmortem or disaster settings, warranting future work on model robustness and real-life applications.</p></sec></sec></body><back><ack><p>During the preparation of this manuscript, the authors used Asus Copilot and the premium web version of ChatGPT (GPT-5.1; OpenAI) to assist in paraphrasing and improving the clarity of language. After using these tools, the authors manually reviewed, edited, and verified all content to ensure accuracy and originality and take full responsibility for the final version of the manuscript.</p></ack><notes><sec><title>Funding</title><p>This study did not receive external funding.</p></sec><sec><title>Data Availability</title><p>The data used in this study were obtained from the Dental and Mouth Hospital of Airlangga University. Data are available from the authors upon request and with permission from the Dental and Mouth Hospital of Airlangga University.</p></sec></notes><fn-group><fn fn-type="con"><p>VWH conceptualized the study, designed the methodology, performed data preprocessing, and developed the deep learning models. MSMAR contributed expertise in dental radiology and supervised methodological design. RR assisted in data analysis, statistical evaluation, and interpretation of results. AK provided technical implementation and optimization of convolutional neural network architectures. BAY supported dataset preparation, annotation, coding support, and validation of preprocessing steps. AY supervised the overall research process, ensured compliance with ethical standards, and critically reviewed the manuscript. All authors read and approved the final version of the manuscript.</p></fn><fn fn-type="other"><label>Disclosures</label><p>The authors declare that there are no financial interests, commercial affiliations, or other potential conflicts of interest that could have influenced the objectivity of this research or the writing of this paper</p><p>Code,</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb3">MAE</term><def><p>mean absolute error</p></def></def-item><def-item><term id="abb4">MAPE</term><def><p>mean absolute percentage error</p></def></def-item><def-item><term id="abb5">SMOTE</term><def><p>Synthetic Minority Oversampling Technique</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fidya</surname><given-names>F</given-names> </name><name name-style="western"><surname>Priyambadha</surname><given-names>B</given-names> </name></person-group><article-title>Automation of gender determination in human canines using artificial intelligence</article-title><source>Dent J (Maj Ked Gigi)</source><volume>50</volume><issue>3</issue><fpage>116</fpage><pub-id pub-id-type="doi">10.20473/j.djmkg.v50.i3.p116-120</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>M&#x00E2;nica</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gorza</surname><given-names>L</given-names> </name></person-group><article-title>Forensic odontology in the 21st century - identifying the opinions of those behind the teaching</article-title><source>J Forensic Leg Med</source><year>2019</year><month>05</month><volume>64</volume><fpage>7</fpage><lpage>13</lpage><pub-id pub-id-type="doi">10.1016/j.jflm.2019.03.006</pub-id><pub-id pub-id-type="medline">30878916</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Rompas</surname><given-names>E</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Iswara</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Ali</surname><given-names>A</given-names> </name><name name-style="western"><surname>Jamaluddin</surname></name></person-group><article-title>Metode identifikasi jenazah: primer dan sekunder</article-title><source>Pengantar Ilmu Kedokteran Forensik Dan Medikolegal</source><year>2023</year><publisher-name>Eureka Media Aksara</publisher-name><fpage>67</fpage><lpage>79</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://repository.penerbiteureka.com/media/publications/564333-pengantar-ilmu-kedokteran-forensik-dan-m-6000d5f2.pdf">https://repository.penerbiteureka.com/media/publications/564333-pengantar-ilmu-kedokteran-forensik-dan-m-6000d5f2.pdf</ext-link></comment><pub-id pub-id-type="other">9786231513076</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Heng</surname><given-names>D</given-names> </name><name name-style="western"><surname>Manica</surname><given-names>S</given-names> </name><name name-style="western"><surname>Franco</surname><given-names>A</given-names> </name></person-group><article-title>Forensic dentistry as an analysis tool for sex estimation: a review of current techniques</article-title><source>Res Rep Forensic Med Sci</source><year>2022</year><volume>12</volume><fpage>25</fpage><lpage>39</lpage><pub-id pub-id-type="doi">10.2147/RRFMS.S334796</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blau</surname><given-names>S</given-names> </name><name name-style="western"><surname>Briggs</surname><given-names>CA</given-names> </name></person-group><article-title>The role of forensic anthropology in disaster victim identification (DVI)</article-title><source>Forensic Sci Int</source><year>2011</year><month>02</month><day>25</day><volume>205</volume><issue>1-3</issue><fpage>29</fpage><lpage>35</lpage><pub-id pub-id-type="doi">10.1016/j.forsciint.2010.07.038</pub-id><pub-id pub-id-type="medline">20797826</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Patil</surname><given-names>V</given-names> </name><name name-style="western"><surname>Vineetha</surname><given-names>R</given-names> </name><name name-style="western"><surname>Vatsa</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Artificial neural network for gender determination using mandibular morphometric parameters: a comparative retrospective study</article-title><source>Cogent Eng</source><year>2020</year><month>01</month><volume>7</volume><issue>1</issue><pub-id pub-id-type="doi">10.1080/23311916.2020.1723783</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Breeland</surname><given-names>G</given-names> </name><name name-style="western"><surname>Aktar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Patel</surname><given-names>BC</given-names> </name></person-group><article-title>Anatomy, head and neck, mandible</article-title><source>StatPearls [Internet]</source><year>2024</year><publisher-name>StatPearls Publishing</publisher-name><pub-id pub-id-type="medline">30335325</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Arifin</surname><given-names>R</given-names> </name><name name-style="western"><surname>Majedi</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Pertiwi</surname><given-names>FC</given-names> </name><name name-style="western"><surname>Sinay</surname><given-names>SN</given-names> </name></person-group><article-title>The relationship between facial shape and tooth shape ages 12-14 years old in South Daha</article-title><source>Dentino J Kedokt Gigi</source><year>2022</year><volume>7</volume><issue>2</issue><fpage>163</fpage><pub-id pub-id-type="doi">10.20527/dentino.v7i2.14624</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Coelho</surname><given-names>J</given-names> </name><name name-style="western"><surname>Armelim Almiro</surname><given-names>P</given-names> </name><name name-style="western"><surname>Nunes</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Sex and age biological variation of the mandible in a Portuguese population- a forensic and medico-legal approaches with three-dimensional analysis</article-title><source>Sci Justice</source><year>2021</year><month>11</month><volume>61</volume><issue>6</issue><fpage>704</fpage><lpage>713</lpage><pub-id pub-id-type="doi">10.1016/j.scijus.2021.08.004</pub-id><pub-id pub-id-type="medline">34802644</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ogawa</surname><given-names>R</given-names> </name><name name-style="western"><surname>Ogura</surname><given-names>I</given-names> </name></person-group><article-title>AI-based computer-aided diagnosis for panoramic radiographs: quantitative analysis of mandibular cortical morphology in relation to age and gender</article-title><source>J Stomatol Oral Maxillofac Surg</source><year>2022</year><month>09</month><volume>123</volume><issue>4</issue><fpage>383</fpage><lpage>387</lpage><pub-id pub-id-type="doi">10.1016/j.jormas.2022.06.025</pub-id><pub-id pub-id-type="medline">35772701</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ningtyas</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Widyaningrum</surname><given-names>R</given-names> </name><name name-style="western"><surname>Shantiningsih</surname><given-names>RR</given-names> </name><name name-style="western"><surname>Yanuaryska</surname><given-names>RD</given-names> </name></person-group><article-title>Sex estimation using angular measurements of nasion, sella, and glabella on lateral cephalogram among Indonesian adults in Yogyakarta</article-title><source>Egypt J Forensic Sci</source><year>2023</year><month>10</month><day>19</day><volume>13</volume><issue>1</issue><fpage>48</fpage><pub-id pub-id-type="doi">10.1186/s41935-023-00368-9</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kurniawan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sosiawan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Nurrahman</surname><given-names>TF</given-names> </name><etal/></person-group><article-title>Predicting sex from panoramic radiographs using mandibular morphometric analysis in Surabaya, Indonesia</article-title><source>Bull Int Assoc Paleodont</source><year>2023</year><access-date>2026-02-08</access-date><volume>17</volume><issue>1</issue><fpage>32</fpage><lpage>40</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.researchgate.net/publication/372078329_Predicting_sex_from_panoramic_radiographs_using_mandibular_morphometric_analysis_in_Surabaya_Indonesia">https://www.researchgate.net/publication/372078329_Predicting_sex_from_panoramic_radiographs_using_mandibular_morphometric_analysis_in_Surabaya_Indonesia</ext-link></comment></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elijah</surname><given-names>IE</given-names> </name><name name-style="western"><surname>Sunday</surname><given-names>GS</given-names> </name><name name-style="western"><surname>Wokpeogu</surname><given-names>CW</given-names> </name></person-group><article-title>Estimation of sex and stature using craniofacial variables in the Yoruba ethnic group of Nigeria</article-title><source>Saudi J Biomed Res</source><year>2021</year><access-date>2026-03-03</access-date><volume>6</volume><issue>5</issue><fpage>95</fpage><lpage>102</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://saudijournals.com/media/articles/SJBR_65_95-102.pdf">https://saudijournals.com/media/articles/SJBR_65_95-102.pdf</ext-link></comment><pub-id pub-id-type="doi">10.36348/sjbr.2021.v06i05.003</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Arthanari</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sureshbabu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ramalingam</surname><given-names>K</given-names> </name><name name-style="western"><surname>Ravindran</surname><given-names>V</given-names> </name><name name-style="western"><surname>Prathap</surname><given-names>L</given-names> </name><name name-style="western"><surname>Sitaraman</surname><given-names>P</given-names> </name></person-group><article-title>Analyzing mandibular characteristics for age and gender variation through digital radiographic techniques: a retrospective study</article-title><source>Cureus</source><year>2024</year><month>04</month><volume>16</volume><issue>4</issue><fpage>e58500</fpage><pub-id pub-id-type="doi">10.7759/cureus.58500</pub-id><pub-id pub-id-type="medline">38765451</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hwang</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Park</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Moon</surname><given-names>JH</given-names> </name><etal/></person-group><article-title>Automated identification of cephalometric landmarks: part 2-might it be better than human?</article-title><source>Angle Orthod</source><year>2020</year><month>01</month><volume>90</volume><issue>1</issue><fpage>69</fpage><lpage>76</lpage><pub-id pub-id-type="doi">10.2319/022019-129.1</pub-id><pub-id pub-id-type="medline">31335162</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Handayani</surname><given-names>VW</given-names> </name><name name-style="western"><surname>Yudianto</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sylvia</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Riries</surname><given-names>R</given-names> </name><name name-style="western"><surname>Caesarardhi</surname><given-names>MR</given-names> </name><name name-style="western"><surname>Putra</surname><given-names>R</given-names> </name></person-group><article-title>The potential of synthetic minority oversampling technique to enhance the precision of gender prediction: an investigation of artificial neural networks with cephalometry</article-title><source>Russian J Forensic Med</source><year>2024</year><month>06</month><day>7</day><volume>10</volume><issue>2</issue><fpage>139</fpage><lpage>151</lpage><pub-id pub-id-type="doi">10.17816/fm16110</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Russel</surname><given-names>S</given-names> </name><name name-style="western"><surname>Norvig</surname><given-names>P</given-names> </name></person-group><source>Artificial Intelligence: A Modern Approach</source><year>2020</year><edition>4</edition><publisher-name>Pearson</publisher-name></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mohammad</surname><given-names>N</given-names> </name><name name-style="western"><surname>Ahmad</surname><given-names>R</given-names> </name><name name-style="western"><surname>Kurniawan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mohd Yusof</surname><given-names>MYP</given-names> </name></person-group><article-title>Applications of contemporary artificial intelligence technology in forensic odontology as primary forensic identifier: A scoping review</article-title><source>Front Artif Intell</source><year>2022</year><month>12</month><day>6</day><volume>5</volume><fpage>1049584</fpage><pub-id pub-id-type="doi">10.3389/frai.2022.1049584</pub-id><pub-id pub-id-type="medline">36561660</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Abdelhamid</surname><given-names>M</given-names> </name><name name-style="western"><surname>Desai</surname><given-names>A</given-names> </name></person-group><article-title>Balancing the scales: a comprehensive study on tackling class imbalance in binary classification</article-title><source>arXiv</source><comment>Preprint posted online on  Sep 29, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2409.19751</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gau</surname><given-names>G</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>M</given-names> </name></person-group><article-title>Using machine learning to determine the efficacy of socio-economic indicators as predictors for flood risk in London</article-title><source>Rev Int G&#x00E9;omatique</source><year>2024</year><month>10</month><day>25</day><volume>33</volume><fpage>427</fpage><lpage>443</lpage><pub-id pub-id-type="doi">10.32604/rig.2024.055752</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hinojosa Lee</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Braet</surname><given-names>J</given-names> </name><name name-style="western"><surname>Springael</surname><given-names>J</given-names> </name></person-group><article-title>Performance metrics for multilabel emotion classification: comparing micro, macro, and weighted F1-scores</article-title><source>Appl Sci (Basel)</source><year>2024</year><month>10</month><day>28</day><volume>14</volume><issue>21</issue><fpage>9863</fpage><pub-id pub-id-type="doi">10.3390/app14219863</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Prabha</surname><given-names>PS</given-names> </name><name name-style="western"><surname>Ganesan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lakshmi</surname><given-names>KC</given-names> </name><name name-style="western"><surname>Murugan</surname><given-names>AJ</given-names> </name></person-group><article-title>Sex determination through analysis of mandibular indices using lateral cephalogram: an artificial intelligence diagnostics</article-title><source>Discov Artif Intell</source><year>2025</year><volume>5</volume><fpage>108</fpage><pub-id pub-id-type="doi">10.1007/s44163-025-00371-0</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gao</surname><given-names>F</given-names> </name><name name-style="western"><surname>Tang</surname><given-names>Y</given-names> </name></person-group><article-title>Multimodal deep learning for cephalometric landmark detection and treatment prediction</article-title><source>Sci Rep</source><year>2025</year><month>07</month><day>12</day><volume>15</volume><issue>1</issue><fpage>25205</fpage><pub-id pub-id-type="doi">10.1038/s41598-025-06229-w</pub-id><pub-id pub-id-type="medline">40651957</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Perez</surname><given-names>L</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name></person-group><article-title>The effectiveness of data augmentation in image classification using deep learning</article-title><source>arXiv</source><comment>Preprint posted online on  Dec 13, 2017</comment><pub-id pub-id-type="doi">10.48550/arXiv.1712.04621</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Berends</surname><given-names>B</given-names> </name><name name-style="western"><surname>Bielevelt</surname><given-names>F</given-names> </name><name name-style="western"><surname>Schreurs</surname><given-names>R</given-names> </name><name name-style="western"><surname>Vinayahalingam</surname><given-names>S</given-names> </name><name name-style="western"><surname>Maal</surname><given-names>T</given-names> </name><name name-style="western"><surname>de Jong</surname><given-names>G</given-names> </name></person-group><article-title>Fully automated landmarking and facial segmentation on 3D photographs</article-title><source>arXiv</source><comment>Preprint posted online on  Sep 19, 2023</comment><pub-id pub-id-type="doi">10.21203/rs.3.rs-3626264/v1</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mali</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mishra</surname><given-names>D</given-names> </name><name name-style="western"><surname>Vijayalaxmi</surname><given-names>M</given-names> </name></person-group><article-title>Benchmarking for recommender system (MFRISE)</article-title><source>3C TIC</source><year>2022</year><volume>11</volume><issue>2</issue><fpage>146</fpage><lpage>156</lpage><pub-id pub-id-type="doi">10.17993/3ctic.2022.112.146-156</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fayyaz</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Ebrahimian</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nawara</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ibrahim</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kashef</surname><given-names>R</given-names> </name></person-group><article-title>Recommendation systems: algorithms, challenges, metrics, and business opportunities</article-title><source>Appl Sci (Basel)</source><year>2020</year><month>11</month><day>2</day><volume>10</volume><issue>21</issue><fpage>7748</fpage><pub-id pub-id-type="doi">10.3390/app10217748</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>W</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>Y</given-names> </name></person-group><article-title>Analysis of the mean absolute error (MAE) and the root mean square error (RMSE) in assessing rounding model</article-title><source>IOP Conf Ser Mater Sci Eng</source><year>2018</year><month>03</month><day>1</day><volume>324</volume><issue>1</issue><fpage>012049</fpage><pub-id pub-id-type="doi">10.1088/1757-899X/324/1/012049</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morley</surname><given-names>SK</given-names> </name><name name-style="western"><surname>Brito</surname><given-names>TV</given-names> </name><name name-style="western"><surname>Welling</surname><given-names>DT</given-names> </name></person-group><article-title>Measures of model performance based on the log accuracy ratio</article-title><source>Space Weather</source><year>2018</year><month>01</month><volume>16</volume><issue>1</issue><fpage>69</fpage><lpage>88</lpage><pub-id pub-id-type="doi">10.1002/2017SW001669</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fernandez</surname><given-names>A</given-names> </name><name name-style="western"><surname>Garcia</surname><given-names>S</given-names> </name><name name-style="western"><surname>Herrera</surname><given-names>F</given-names> </name><name name-style="western"><surname>Chawla</surname><given-names>NV</given-names> </name></person-group><article-title>SMOTE for learning from imbalanced data: progress and challenges, marking the 15-year anniversary</article-title><source>J Artif Intell Res</source><year>2018</year><volume>61</volume><issue>1</issue><fpage>863</fpage><lpage>905</lpage><pub-id pub-id-type="doi">10.1613/jair.1.11192</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chawla</surname><given-names>NV</given-names> </name><name name-style="western"><surname>Bowyer</surname><given-names>KW</given-names> </name><name name-style="western"><surname>Hall</surname><given-names>LO</given-names> </name><name name-style="western"><surname>Kegelmeyer</surname><given-names>WP</given-names> </name></person-group><article-title>SMOTE: synthetic minority over-sampling technique</article-title><source>J Artif Intell Res</source><year>2002</year><month>06</month><day>1</day><volume>16</volume><issue>1</issue><fpage>321</fpage><lpage>357</lpage><pub-id pub-id-type="doi">10.1613/jair.953</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ioffe</surname><given-names>S</given-names> </name><name name-style="western"><surname>Szegedy</surname><given-names>C</given-names> </name></person-group><article-title>Batch normalization: accelerating deep network training by reducing internal covariate shift</article-title><source>PMLR</source><year>2015</year><access-date>2026-02-09</access-date><volume>37</volume><fpage>448</fpage><lpage>456</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://proceedings.mlr.press/v37/ioffe15.html">https://proceedings.mlr.press/v37/ioffe15.html</ext-link></comment></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Yudhantorro</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Aulia Vinarti</surname><given-names>R</given-names> </name><name name-style="western"><surname>Handayani</surname><given-names>VW</given-names> </name><name name-style="western"><surname>Anggraeni</surname><given-names>W</given-names> </name><name name-style="western"><surname>Muklason</surname><given-names>A</given-names> </name></person-group><article-title>Age and sex prediction from cervical vertebrae cephalogram image using convolutional neural network model</article-title><conf-name>2024 International Seminar on Intelligent Technology and Its Applications (ISITIA)</conf-name><conf-date>Jul 10-12, 2024</conf-date><pub-id pub-id-type="doi">10.1109/ISITIA63062.2024.10668169</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Litjens</surname><given-names>G</given-names> </name><name name-style="western"><surname>Kooi</surname><given-names>T</given-names> </name><name name-style="western"><surname>Bejnordi</surname><given-names>BE</given-names> </name><etal/></person-group><article-title>A survey on deep learning in medical image analysis</article-title><source>Med Image Anal</source><year>2017</year><month>12</month><volume>42</volume><fpage>60</fpage><lpage>88</lpage><pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id><pub-id pub-id-type="medline">28778026</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lundervold</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Lundervold</surname><given-names>A</given-names> </name></person-group><article-title>An overview of deep learning in medical imaging focusing on MRI</article-title><source>Z Med Phys</source><year>2019</year><month>05</month><volume>29</volume><issue>2</issue><fpage>102</fpage><lpage>127</lpage><pub-id pub-id-type="doi">10.1016/j.zemedi.2018.11.002</pub-id><pub-id pub-id-type="medline">30553609</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Franco</surname><given-names>A</given-names> </name><name name-style="western"><surname>Porto</surname><given-names>L</given-names> </name><name name-style="western"><surname>Heng</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Diagnostic performance of convolutional neural networks for dental sexual dimorphism</article-title><source>Sci Rep</source><year>2022</year><month>10</month><day>14</day><volume>12</volume><issue>1</issue><fpage>17279</fpage><pub-id pub-id-type="doi">10.1038/s41598-022-21294-1</pub-id><pub-id pub-id-type="medline">36241670</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elreedy</surname><given-names>D</given-names> </name><name name-style="western"><surname>Atiya</surname><given-names>AF</given-names> </name><name name-style="western"><surname>Kamalov</surname><given-names>F</given-names> </name></person-group><article-title>A theoretical distribution analysis of synthetic minority oversampling technique (SMOTE) for imbalanced learning</article-title><source>Mach Learn</source><year>2024</year><month>07</month><volume>113</volume><issue>7</issue><fpage>4903</fpage><lpage>4923</lpage><pub-id pub-id-type="doi">10.1007/s10994-022-06296-4</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Matharaarachchi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Domaratzki</surname><given-names>M</given-names> </name><name name-style="western"><surname>Muthukumarana</surname><given-names>S</given-names> </name></person-group><article-title>Enhancing SMOTE for imbalanced data with abnormal minority instances</article-title><source>Mach Learn Appl</source><year>2024</year><month>12</month><volume>18</volume><fpage>100597</fpage><pub-id pub-id-type="doi">10.1016/j.mlwa.2024.100597</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Turlapati</surname><given-names>VP</given-names> </name><name name-style="western"><surname>Prusty</surname><given-names>MR</given-names> </name></person-group><article-title>Outlier-SMOTE: a refined oversampling technique for improved detection of COVID-19</article-title><source>Intell Based Med</source><year>2020</year><month>12</month><volume>3</volume><fpage>100023</fpage><pub-id pub-id-type="doi">10.1016/j.ibmed.2020.100023</pub-id><pub-id pub-id-type="medline">33289013</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Duan</surname><given-names>F</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Cai</surname><given-names>Z</given-names> </name></person-group><article-title>An oversampling method of unbalanced data for mechanical fault diagnosis based on MeanRadius-SMOTE</article-title><source>Sensors (Basel)</source><year>2022</year><month>07</month><day>10</day><volume>22</volume><issue>14</issue><fpage>5166</fpage><pub-id pub-id-type="doi">10.3390/s22145166</pub-id><pub-id pub-id-type="medline">35890845</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Brownlee</surname><given-names>J</given-names> </name></person-group><article-title>How to use StandardScaler and MinMaxScaler transforms in python</article-title><source>Machine Learning Mastery</source><year>2020</year><access-date>2024-09-12</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://machinelearningmastery.com/standardscaler-and-minmaxscaler-transforms-in-python/">https://machinelearningmastery.com/standardscaler-and-minmaxscaler-transforms-in-python/</ext-link></comment></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Raju</surname><given-names>VN</given-names> </name><name name-style="western"><surname>Lakshmi</surname><given-names>KP</given-names> </name><name name-style="western"><surname>Jain</surname><given-names>VM</given-names> </name><name name-style="western"><surname>Kalidindi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Padma</surname><given-names>V</given-names> </name></person-group><article-title>Study the influence of normalization/transformation process on the accuracy of supervised classification</article-title><conf-name>2020 Third International Conference on Smart Systems and Inventive Technology (ICSSIT)</conf-name><conf-date>Aug 20-22, 2020</conf-date><pub-id pub-id-type="doi">10.1109/ICSSIT48917.2020.9214160</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>TH</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>AN</given-names> </name></person-group><article-title>Fused RGB and IR image based deep learning detection of dried laver bugak for robotic automation systems</article-title><source>Sci Rep</source><year>2025</year><month>08</month><day>28</day><volume>15</volume><issue>1</issue><fpage>31732</fpage><pub-id pub-id-type="doi">10.1038/s41598-025-16563-8</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Singha</surname><given-names>B</given-names> </name><name name-style="western"><surname>Kumar</surname><given-names>S</given-names> </name></person-group><article-title>Artificial intelligence in age and sex determination using maxillofacial radiographs: a systematic review</article-title><source>J Forensic Odontostomatol</source><year>2024</year><month>04</month><day>30</day><volume>42</volume><issue>1</issue><fpage>30</fpage><lpage>37</lpage><pub-id pub-id-type="doi">10.5281/zenodo.11088513</pub-id><pub-id pub-id-type="medline">38742570</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Matsuda</surname><given-names>S</given-names> </name><name name-style="western"><surname>Miyamoto</surname><given-names>T</given-names> </name><name name-style="western"><surname>Yoshimura</surname><given-names>H</given-names> </name><name name-style="western"><surname>Hasegawa</surname><given-names>T</given-names> </name></person-group><article-title>Personal identification with orthopantomography using simple convolutional neural networks: a preliminary study</article-title><source>Sci Rep</source><year>2020</year><month>08</month><day>11</day><volume>10</volume><issue>1</issue><fpage>13559</fpage><pub-id pub-id-type="doi">10.1038/s41598-020-70474-4</pub-id><pub-id pub-id-type="medline">32782269</pub-id></nlm-citation></ref></ref-list></back></article>