<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR AI</journal-id><journal-id journal-id-type="publisher-id">ai</journal-id><journal-id journal-id-type="index">41</journal-id><journal-title>JMIR AI</journal-title><abbrev-journal-title>JMIR AI</abbrev-journal-title><issn pub-type="epub">2817-1705</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v5i1e84305</article-id><article-id pub-id-type="doi">10.2196/84305</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>AI Applications Integrating Legal and Regulatory Perspectives in Mental Health: Systematic Review</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Kandeel</surname><given-names>Moustafa Elmetwaly</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Abo Hamza</surname><given-names>Eid G</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Abouahmed</surname><given-names>Alaa</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>AbdelAziz</surname><given-names>Gehad Mohamed</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Hashish</surname><given-names>Adham</given-names></name><degrees>SJD</degrees><xref ref-type="aff" rid="aff5">5</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Abo El Wafa</surname><given-names>Tarek</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Khalil</surname><given-names>Ahmed</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Eldakak</surname><given-names>Ahmed</given-names></name><degrees>JSD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>College of Law, Al Ain University</institution><addr-line>Al Ain</addr-line><country>United Arab Emirates</country></aff><aff id="aff2"><institution>Family Counseling Program, College of Arts, Humanities and Social Sciences, University of Sharjah</institution><addr-line>Sharjah</addr-line><country>United Arab Emirates</country></aff><aff id="aff3"><institution>Department of Mental Health, Faculty of Education, Tanta University</institution><addr-line>Tanta</addr-line><country>Egypt</country></aff><aff id="aff4"><institution>College of Law, United Arab Emirates University</institution><addr-line>UAE University Main Campus, Building H2, Office 2027, Sheikh Khalifa Street, Asharij</addr-line><addr-line>Al Ain</addr-line><country>United Arab Emirates</country></aff><aff id="aff5"><institution>Institute of Public Administration</institution><addr-line>Riyadh</addr-line><country>Saudi Arabia</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Raisaro</surname><given-names>Jean-Louis</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Najand</surname><given-names>Babak</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Mahdy</surname><given-names>Elsoghair</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Ahmed Eldakak, JSD, College of Law, United Arab Emirates University, UAE University Main Campus, Building H2, Office 2027, Sheikh Khalifa Street, Asharij, Al Ain, 15551, United Arab Emirates, 971 505331794; <email>a.eldakak@uaeu.ac.ae</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>all authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>27</day><month>4</month><year>2026</year></pub-date><volume>5</volume><elocation-id>e84305</elocation-id><history><date date-type="received"><day>17</day><month>09</month><year>2025</year></date><date date-type="rev-recd"><day>27</day><month>01</month><year>2026</year></date><date date-type="accepted"><day>07</day><month>03</month><year>2026</year></date></history><copyright-statement>&#x00A9; Moustafa Elmetwaly Kandeel, Eid G Abo Hamza, Alaa Abouahmed, Gehad Mohamed AbdelAziz, Adham Hashish, Tarek Abo El Wafa, Ahmed Khalil, Ahmed Eldakak. Originally published in JMIR AI (<ext-link ext-link-type="uri" xlink:href="https://ai.jmir.org">https://ai.jmir.org</ext-link>), 27.4.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.ai.jmir.org/">https://www.ai.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://ai.jmir.org/2026/1/e84305"/><abstract><sec><title>Background</title><p>Artificial intelligence (AI) offers new methods to improve diagnosis and treatment in mental health. However, its use raises legal and ethical concerns.</p></sec><sec><title>Objective</title><p>AI is increasingly being used for mental health care, but its clinical prominence and ethical implications are yet to be determined. This systematic review discusses the clinical efficacy and the ethical issues of AI in mental health treatment and is trying to focus on the main conclusions with regard to the diagnostic accuracy and the therapeutic efficacy.</p></sec><sec sec-type="methods"><title>Methods</title><p>The review encompasses an exhaustive analysis of 35 studies in the narrow time frame of 2013&#x2010;2024. It allows for multidatabase exploration and follows the systematic and well-established practice of PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) 2020 guidelines. This review searched PubMed (biomedical emphasis), IEEE Xplore (engineering or AI), PsycINFO (psychological literature), Scopus (multidisciplinary focus), and Cochrane Library (evidence-based treatment) from January 1, 2013, to December 31, 2024. Studies include those that focused on AI applications for diagnosis, treatment, or patient engagement, excluding tangential uses (eg, administrative tasks). Only English-language publications were searched to mitigate language bias, though this introduces potential geographic bias.</p></sec><sec sec-type="results"><title>Results</title><p>AI-enabled interventions of natural language processing models showed up to 89% accuracy for depression detection. The wearables, as in the Empatica E4, showed an <italic>F</italic><sub>1</sub>-score of 0.81 to predict anxiety episodes. AI-enabled therapies, such as chat-based interventions and online cognitive behavioral therapy, have been shown to improve the anxiety symptoms of about 30% in some studies; however, there was considerable variability in the impact based on study design, intervention duration, and comparator conditions, as well as the overall methodological quality of the studies. However, challenges remain, such as including biases in training data, evidenced by performance declines of up to 15% in non-English datasets, and concerns over data privacy.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>In addressing mental health, AI has the potential to revolutionize mental health treatment, offering cost-saving, personalized, and culturally sensitive interventions while protecting privacy, equity, and human agency.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>mental health</kwd><kwd>diagnosis</kwd><kwd>therapeutic interventions</kwd><kwd>patient autonomy</kwd><kwd>data privacy</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The global challenge for the 21st century is mental health conditions. According to an estimation by the World Health Organization (WHO), 970 million people in the world have mental health or substance use disorders, anxiety, and depression, which alone cost the world economy well over US $1 trillion in lost productivity each year [<xref ref-type="bibr" rid="ref1">1</xref>]. Although interventions on mental health treatments have undergone a psychologically and pharmaceutically evolving process in the last decades, there are diagnostic gaps, access to treatment, and patient activation [<xref ref-type="bibr" rid="ref2">2</xref>]. Contemporary practices remain highly reliant on the clinician&#x2019;s report, face-to-face therapy, and trial-and-error use of medication, all of which are discouraged due to resource constraints, stigma, and system inefficiency [<xref ref-type="bibr" rid="ref2">2</xref>].</p><p>The introduction of artificial intelligence (AI) technologies has revolutionized the ways of handling health care challenges. On the other hand, AI greatly impacts human rights issues, which require tighter control under medical ethical policies [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. In addition, AI may threaten personal privacy and affect human autonomy in the decision-making process. The General Data Protection Regulation (GDPR) guarantees lawful processing by consent, privacy, and data minimization. For example, AI could be a threat to personal privacy and could affect the autonomy of human decision-making. To monitor these problems, in the United States, the Health Insurance Portability and Accountability Act (HIPAA) focuses on the confidentiality of health information of the patients. Similarly, in the European Union, the GDPR is the law governing how personal data, including health data, may be processed.</p><p>Apart from the legal issues, AI has revolutionary perks to treat patients. With machine learning, natural language processing (NLP), and predictive analytics, AI-enabled interventions are capable of analyzing massive amounts of health-related data from wearables&#x2014;everything they collect on a metric level to psychiatric data within electronic health records (EHRs). From these data, insights can be derived beyond what clinicians are able to observe themselves [<xref ref-type="bibr" rid="ref5">5</xref>]. NLP algorithms, for instance, have been found to detect early signs of depression in social media linguistic signs [<xref ref-type="bibr" rid="ref6">6</xref>]. Further, for anxiety attacks, predictions may be made using AI-enabled wearables that monitor physiological signals such as heart variability [<xref ref-type="bibr" rid="ref7">7</xref>]. These innovations hold potential to fill the gaps in the treatment of mental health in low-resource settings where the availability of mental health specialists is low [<xref ref-type="bibr" rid="ref8">8</xref>].</p><p>Besides improving the diagnosis, AI is also changing the therapeutic intervention. Randomized controlled trials have found that chatbots like Woebot are effective at reducing depression and anxiety symptoms by delivering cognitive behavioral therapy (CBT) through text-based interfaces [<xref ref-type="bibr" rid="ref9">9</xref>]. Likewise, predictive models based on AI are helping in creating a personalized treatment plan through predictions in relation to an individual&#x2019;s respective response to antidepressants [<xref ref-type="bibr" rid="ref10">10</xref>]. Besides clinical interventions, AI-enabled interventions empower people using patient-led management platforms. These interventions offer real-time feedback, psychoeducation, and peer support for improved patient empowerment and reduced use of the overwhelmed health care infrastructure [<xref ref-type="bibr" rid="ref11">11</xref>]. However, there are ethical, regulatory, and practical concerns with integrating AI into mental health practice [<xref ref-type="bibr" rid="ref12">12</xref>]. Data privacy issues, algorithmic bias, and dehumanizing treatment are only a few of the problems that continue to exist if AI is trained through underrepresented data from minority groups [<xref ref-type="bibr" rid="ref13">13</xref>]. Further, regulatory avenues for the validation and implementation of AI remain absent, which raises questions about accountability and clinical supervision [<xref ref-type="bibr" rid="ref14">14</xref>].</p><p>This paper reviews evidence for AI&#x2019;s role in mental health diagnosis, treatment, and patient empowerment, critically evaluating strengths, weaknesses, and ethical implications. The PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) 2020 guidelines are followed in this study, which promotes the transparent and complete reporting of systematic literature review. PRISMA 2020 guidelines are a recent major update of the PRISMA that includes more comprehensive guidance on the reporting of methods, terminology, and results. This review also proposes a legal framework and guidelines to protect human privacy in AI applications for medical science. From a review of current decade peer-reviewed published research, this systematic review aims to provide a guideline for policymakers, clinicians, and researchers for implementing AI&#x2019;s potential while maintaining patient rights and equity.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Overview</title><p>This review follows the guidelines of the PRISMA 2020 as followed by Page et al [<xref ref-type="bibr" rid="ref15">15</xref>]. PRISMA is an evidence-based approach to enhance transparency, reproducibility, and methodological rigor in systematic reviews. PRISMA emphasizes organized reporting at key stages such as search strategy, study selection, data extraction, and synthesis in order to minimize bias and ensure accountability.</p></sec><sec id="s2-2"><title>Study Screening and Selection Process</title><p>The study selection process followed a rigorous and systematic screening process with multiple stages. Initially, a total of 2534 studies were retrieved from 5 databases (PubMed, IEEE Xplore, PsycINFO, Scopus, and Cochrane Library) using predefined search terms (read Section 2.1 for search strategy). After removing duplicates and applying exclusion criteria, 35 studies were selected for final inclusion. The selection criteria focused on empirical studies that explored the use of AI in mental health diagnosis, treatment, or patient engagement, excluding studies that were not peer-reviewed or did not focus on relevant applications.</p><p>For this review, 2 independent reviewers were selected who performed the screening or selection process. The reviewers took care of the reliability and validity of the inclusion criteria. In cases of dispute, a third reviewer was consulted to settle the dispute. The Cohen &#x03BA; statistic was used to determine the level of interrater agreement. The Cohen &#x03BA; score was 0.84, with substantial agreement between the reviewers in the selection of studies.</p></sec><sec id="s2-3"><title>Rationale for Database Selection</title><p>To be inclusive of interdisciplinary knowledge, this review searched PubMed (biomedical emphasis), IEEE Xplore (engineering or AI), PsycINFO (psychological literature), Scopus (multidisciplinary focus), and Cochrane Library (evidence-based treatment). This strategy ensures comprehensive representation of technical, clinical, and ethical aspects of AI in mental health, as guided by Grant and Booth [<xref ref-type="bibr" rid="ref16">16</xref>]. Research published between January 2013 and December 2024 were considered for capturing updates in recent AI applications.</p><p>Search terms combined Boolean operators and Medical Subject Headings such as (&#x201C;artificial intelligence&#x201D; OR &#x201C;machine learning&#x201D; OR &#x201C;deep learning&#x201D; OR &#x201C;neural network&#x201D;) AND (&#x201C;mental health&#x201D; OR &#x201C;mental disorder&#x201D; OR &#x201C;depression&#x201D; OR &#x201C;anxiety&#x201D; OR &#x201C;schizophrenia&#x201D; OR &#x201C;PTSD&#x201D;) AND (&#x201C;diagnosis&#x201D; OR &#x201C;screening&#x201D; OR &#x201C;treatment&#x201D; OR &#x201C;therapy&#x201D; OR &#x201C;patient empowerment&#x201D; OR &#x201C;self-management&#x201D;) for example (&#x201C;artificial intelligence&#x201D; OR &#x201C;machine learning&#x201D;) AND (&#x201C;mental health&#x201D;) OR &#x201C;depression&#x201D; OR &#x201C;anxiety&#x201D; AND (&#x201C;diagnosis&#x201D; OR &#x201C;therapy&#x201D; OR &#x201C;patient empowerment&#x201D;). It also combined various keywords related to ethical and legal challenges such as &#x201C;artificial intelligence&#x201D; OR &#x201C;machine learning&#x201D; OR &#x201C;deep learning&#x201D; OR &#x201C;neural network&#x201D; AND &#x201C;personal data&#x201D; OR &#x201C;ethics&#x201D; OR &#x201C;legal protection&#x201D; OR &#x201C;HIPAA.&#x201D; Search terms were iteratively refined using the Sample, Phenomenon of Interest, Design, Evaluation, Research Type framework to align with qualitative and quantitative research goals [<xref ref-type="bibr" rid="ref17">17</xref>].</p></sec><sec id="s2-4"><title>Inclusion and Exclusion Criteria</title><p>In this review, only the peer-reviewed empirical studies were included to ensure methodological rigor and peer validation [<xref ref-type="bibr" rid="ref18">18</xref>]. Moreover, the study focused on studies where AI was central to diagnosis, treatment, or patient engagement, excluding tangential uses (eg, administrative tasks). Only English language publications were searched to mitigate language bias, though this introduces potential geographic bias [<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>The study did not include nonempirical papers (ie, editorials) that lack testable hypotheses or evidence, which contravenes the empiricism principle underpinning systematic reviews. The study did not include interventions irrelevant to human mental health either.</p></sec><sec id="s2-5"><title>Risk-of-Bias Assessment</title><p>To assess the methodological quality and potential risk of bias in the included studies, the Risk of Bias-2 for intervention trials was used. The results of these assessments were summed to classify studies into 3 categories: low, moderate, or high risk of bias. In total, 9 (25%) studies were rated as having a low risk of bias, while 21 (60%) studies were deemed to have a moderate risk of bias, and the remaining 5 (15%) studies were classified as high risk.</p></sec><sec id="s2-6"><title>Literature Characteristics of the Included Systematic Review</title><p>Descriptive data from the included studies were examined based on the publication period, origin of countries, AI modality, mental health focus, methodological quality, outcome indicators, and outcome relevance and ethics. All parameters were extracted and organized using Microsoft Excel 365, which is shown in <xref ref-type="table" rid="table1">Table 1</xref>. In total, 35 research studies were included in this systematic review. These studies were published from 2013 to 2024. The publication record increased after 2018, with 23 of 35 (66%) of the papers appearing in the past 6 years. Based upon the first-author affiliations, the majority of studies were from the United States (12/35), followed by China (7/35), the United Kingdom (5/35), Australia (4/35), and Canada (3/35). In total, these studies account for approximately 89% (31/35) of all publications. All studies were published in English, reflecting the language inclusion eligibility criterion.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Statistical analysis of included studies.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Domain and item</td><td align="left" valign="bottom">Values, n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="2">Publication period</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2013&#x2010;2017</td><td align="left" valign="top">12 (34)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2018&#x2010;2024</td><td align="left" valign="top">23 (66)</td></tr><tr><td align="left" valign="top" colspan="2">Countries (based upon the first author)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>United States</td><td align="left" valign="top">12 (34)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>China</td><td align="left" valign="top">7 (20)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>United Kingdom</td><td align="left" valign="top">5 (14)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Australia</td><td align="left" valign="top">4 (11)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Canada</td><td align="left" valign="top">3 (9)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other countries</td><td align="left" valign="top">4 (11)</td></tr><tr><td align="left" valign="top" colspan="2">Artificial intelligence modality</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Natural language processing</td><td align="left" valign="top">14 (40)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Wearable or smartphone sensors</td><td align="left" valign="top">9 (26)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Chat-based conversational agents</td><td align="left" valign="top">7 (20)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Multimodal deep-learning frameworks</td><td align="left" valign="top">5 (14)</td></tr><tr><td align="left" valign="top" colspan="2">Focusing on the mental health situation</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Major depressive disorder</td><td align="left" valign="top">18 (51)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Suicide ideation or risk</td><td align="left" valign="top">12 (34)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Bipolar or schizophrenia or anxiety (combined)</td><td align="left" valign="top">5 (14)</td></tr><tr><td align="left" valign="top" colspan="2">Methodological and reporting quality</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Funding declared</td><td align="left" valign="top">27 (77)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Prospective protocol registered</td><td align="left" valign="top">5 (14)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>PRISMA<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> 2020 adherence stated</td><td align="left" valign="top">7 (20)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Any risk-of-bias assessment</td><td align="left" valign="top">21 (60)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Low risk of bias overall</td><td align="left" valign="top">9 (26)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Evidence graded with GRADE<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">4 (11)</td></tr><tr><td align="left" valign="top" colspan="2">Outcome relevance and ethics</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Demonstrated improved outcomes</td><td align="left" valign="top">25 (71)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Addressed legal or regulatory compliance</td><td align="left" valign="top">6 (17)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Conducted fairness or bias audits</td><td align="left" valign="top">7 (20)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>PRISMA: Preferred Reporting Items for Systematic Reviews and Meta-Analyses.</p></fn><fn id="table1fn2"><p><sup>b</sup>GRADE: Grading of Recommendations Assessment, Development and Evaluation. </p></fn></table-wrap-foot></table-wrap><p>As far as AI modality is concerned in past studies, the NLP on social media or clinical text dominated the field and accounts for nearly 40% (14/35) of the studies. This is followed by wearable or smartphone sensor analytics (9/35, 26%), chat-based conversational agents (7/35, 20%), and multimodal deep-learning frameworks that integrate images and text. In the end, the sensor data account for 15% of the studies. The most frequently addressed mental health conditions were major depressive disorder, which accounts for 51% (18/35), and suicide ideation or risk making 34% (12/35), with smaller but growing bodies of work on bipolar disorder, schizophrenia, and anxiety disorders.</p><p>Furthermore, methodological and reporting quality were also investigated in this review. In total, 77% (27/35) of the studies declared funding support, while only 14% (5/35) reported prospective protocol registration (eg, PROSPERO or OSF). Similarly, adherence to PRISMA 2020 was explicitly stated in 20% (7/35) of the studies. The statement of risk-of-bias assessments was present in 60% (21/35) of the papers, primarily using Risk of Bias-2 for intervention trials, Quality Assessment of Diagnostic Accuracy Studies (version 2) for diagnostic accuracy studies, or the Newcastle-Ottawa Scale for observational cohorts. Merely, 26% (9/35) of the included studies were rated as having an overall low risk of bias.</p><p>From all past studies, the performance metrics were heterogeneous. In total, 71% (25/35) reported statistically or clinically meaningful improvements in diagnostic accuracy, treatment prediction, or patient-reported outcomes compared with conventional methods. However, only 17% (6/35) explicitly addressed legal or regulatory compliance frameworks (eg, GDPR), and 20% (7/35) conducted fairness or bias audits.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>This study adopted PRISMA 2020 guidelines for addressing AI applications in mental health. The step-by-step PRISMA-based process is illustrated in <xref ref-type="fig" rid="figure1">Figure 1</xref>.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Systematic review process based upon PRISMA to scrutinize relevant papers. AI: artificial intelligence; PRISMA: Preferred Reporting Items for Systematic Reviews and Meta-Analyses.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e84305_fig01.png"/></fig><sec id="s3-1"><title>AI in Diagnosis</title><sec id="s3-1-1"><title>NLP Applications: Social Media or Text Analysis</title><p>NLP has proven to be a significant tool for detecting mental health disorders using linguistic patterns across social media posts, clinical records, and online discussions. Early efforts by Gkotsis et al [<xref ref-type="bibr" rid="ref6">6</xref>] illustrated the effectiveness of deep learning models such as long short-term memory in identifying depression on Reddit. The study finds a score of 89% accuracy when evaluating syntactic complexity and polarity of sentiment. This methodology was later confirmed by De Choudhury et al [<xref ref-type="bibr" rid="ref20">20</xref>], who applied the modeling latent Dirichlet allocation and support vector machine classifiers to forecast depression onset in Twitter users 3 months before clinical diagnosis (area under the curve [AUC]=0.85). The study used features like social isolation and first-person pronoun applications, which have been identified as a marker. Coppersmith et al [<xref ref-type="bibr" rid="ref21">21</xref>] created an NLP pipeline that could find high-risk individuals on Twitter (AUC=0.92) through the identification of phrases such as &#x201C;can&#x2019;t go on&#x201D; and contextual emotional exhaustion. However, such models are subjected to serious cross-cultural generalizability problems. Harrigian et al [<xref ref-type="bibr" rid="ref22">22</xref>] reported reduced accuracy of English language&#x2013;trained Bidirectional Encoder Representations from Transformers&#x2013;based models by 15% when evaluated in Mandarin or Spanish posts, which makes the need for a multilingual training corpus.</p><p>Recent developments strive to reduce these biases. A study by Liu et al [<xref ref-type="bibr" rid="ref23">23</xref>] trained multilingual transformers such as mental health forums in 12 languages, matching performance (<italic>F</italic><sub>1</sub>=0.82) to monolingual models. In addition to depression and suicidality, NLP has been used for detecting anxiety. Guntuku et al [<xref ref-type="bibr" rid="ref24">24</xref>] integrated Linguistic Inquiry and Word Count lexicons with random forests for the identification of &#x201C;worry&#x201D; and &#x201C;rumination&#x201D; patterns in Reddit and Facebook posts. Although these achievements have been made, the ethical issues have persisted. Liu et al [<xref ref-type="bibr" rid="ref23">23</xref>] cautioned that passive social media surveillance threatens to exploit vulnerable individuals without informed consent, such as adolescents, and can inadvertently reveal sensitive information. Therefore, while NLP provides scalable mental health screening, its real-world use calls for strict validation across diverse populations and ethical precautions for balancing efficacy and privacy. Several NLP-based applications are referred to in earlier research in <xref ref-type="table" rid="table2">Table 2</xref>.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Natural language processing (NLP) applications in mental health diagnosis.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">Technique</td><td align="left" valign="bottom">Key findings</td><td align="left" valign="bottom">Limitations</td><td align="left" valign="bottom">Implications</td></tr></thead><tbody><tr><td align="left" valign="top">Gkotsis et al (2017) [<xref ref-type="bibr" rid="ref6">6</xref>]</td><td align="left" valign="top">LSTM<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> (deep learning)</td><td align="left" valign="top">89% accuracy in detecting depressive language</td><td align="left" valign="top">Limited to English; small sample size</td><td align="left" valign="top">Validates NLP for early depression screening</td></tr><tr><td align="left" valign="top">De Choudhury et al (2013) [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">LDA<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup>+SVM<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup></td><td align="left" valign="top">Predicted depression onset (AUC<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup>=0.85)</td><td align="left" valign="top">Retrospective design; no clinical validation</td><td align="left" valign="top">Social media as a passive monitoring tool</td></tr><tr><td align="left" valign="top">Coppersmith et al (2018) [<xref ref-type="bibr" rid="ref21">21</xref>]</td><td align="left" valign="top">SVM+NLP</td><td align="left" valign="top">Detected suicide risk (AUC=0.92)</td><td align="left" valign="top">Cultural bias in training data</td><td align="left" valign="top">Scalable crisis intervention frameworks</td></tr><tr><td align="left" valign="top">Harrigian et al (2021) [<xref ref-type="bibr" rid="ref22">22</xref>]</td><td align="left" valign="top">BERT<sup><xref ref-type="table-fn" rid="table2fn5">e</xref></sup> (Multilingual)</td><td align="left" valign="top">15% accuracy drop in non-English contexts</td><td align="left" valign="top">Small non-English sample sizes</td><td align="left" valign="top">Urges multilingual model development</td></tr><tr><td align="left" valign="top">Liu et al (2023) [<xref ref-type="bibr" rid="ref23">23</xref>]</td><td align="left" valign="top">XLM-RoBERTa<sup><xref ref-type="table-fn" rid="table2fn6">f</xref></sup></td><td align="left" valign="top">Cross-lingual <italic>F</italic><sub>1</sub>=0.82 for depression or anxiety</td><td align="left" valign="top">Limited low-resource language coverage</td><td align="left" valign="top">Framework for equitable global deployment</td></tr><tr><td align="left" valign="top">Guntuku et al (2017) [<xref ref-type="bibr" rid="ref24">24</xref>]</td><td align="left" valign="top">LIWC<sup><xref ref-type="table-fn" rid="table2fn7">g</xref></sup>+random forest</td><td align="left" valign="top">Detected anxiety via &#x201C;worry&#x201D; lexicon (<italic>F</italic><sub>1</sub>=0.78)</td><td align="left" valign="top">Self-report bias in ground-truth labels</td><td align="left" valign="top">Lexical markers as diagnostic features</td></tr><tr><td align="left" valign="top">D&#x2019;Alfonso et al (2025) [<xref ref-type="bibr" rid="ref25">25</xref>]</td><td align="left" valign="top">Ethical analysis</td><td align="left" valign="top">Highlighted privacy risks for adolescents</td><td align="left" valign="top">Qualitative focus; no quantitative metrics</td><td align="left" valign="top">Calls for regulatory safeguards in AI<sup><xref ref-type="table-fn" rid="table2fn8">h</xref></sup> monitoring</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>LSTM: long short-term memory. </p></fn><fn id="table2fn2"><p><sup>b</sup>LDA: latent Dirichlet allocation. </p></fn><fn id="table2fn3"><p><sup>c</sup>SVM: support vector machine. </p></fn><fn id="table2fn4"><p><sup>d</sup>AUC: area under the curve. </p></fn><fn id="table2fn5"><p><sup>e</sup>BERT: Bidirectional Encoder Representations from Transformers. </p></fn><fn id="table2fn6"><p><sup>f</sup>XLM-RoBERTa: cross-lingual language model-robustly optimized BERT pretraining approach </p></fn><fn id="table2fn7"><p><sup>g</sup>LIWC: Linguistic Inquiry and Word Count. </p></fn><fn id="table2fn8"><p><sup>h</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap><p>NLP technology processes speech and writing and identifies linguistic markers for mental disorders. For example, depression is characterized by first-person pronoun use, negative emotion words, and lower lexical diversity [<xref ref-type="bibr" rid="ref20">20</xref>]. Suicidal thoughts have been found to correspond with overt references toward self-harm (eg, &#x201C;end it all&#x201D;) and implicit metaphors (eg, &#x201C;can&#x2019;t see a way out&#x201D;) [<xref ref-type="bibr" rid="ref21">21</xref>]. There are numerous strengths, such as enabling passive assessment of large numbers through social media [<xref ref-type="bibr" rid="ref24">24</xref>]. Moreover, NLP technology includes early detection predictive models, such as the one studied by De Choudhury et al [<xref ref-type="bibr" rid="ref20">20</xref>], who detected early depression onset 3 months prior to clinical diagnosis from Twitter data (AUC=0.85). Further, this has a low cost because the technology is free from clinical infrastructure requirements. Moreover, NLP technology democratizes access to mental health screening.</p></sec><sec id="s3-1-2"><title>Wearables and Biometric Sensors: Tracking Physiological and Behavioral Markers</title><p>Wearables (eg, Fitbit and Empatica) leverage AI to analyze physiological (heart rate variability [HRV] and electrodermal activity) and behavioral (sleep and mobility) data. These sensors have the capabilities of real-time monitoring of continuous data that can capture dynamic risk assessment. Jacobson et al [<xref ref-type="bibr" rid="ref7">7</xref>] predicted anxiety episodes via HRV with <italic>F</italic><sub>1</sub>=0.81 and wearables that provide quantifiable biomarkers (eg, reduced step count in depression). However, the accuracy of the sensor deteriorates with inconsistent use of the device [<xref ref-type="bibr" rid="ref11">11</xref>]. In addition, physical activity, caffeine ingestion, or environmental stress bias biometric readings. However, past work has some limitations, since most studies have been conducted on young people who are technology-literate, and the older people or the poor are not included.</p></sec><sec id="s3-1-3"><title>Neuroimaging AI: Decoding Brain Patterns in Functional Magnetic Resonance Imaging and Electroencephalogram</title><p>AI models, especially convolutional neural networks (CNNs) and graph neural networks, are used to diagnose schizophrenia, depression, and bipolar disorder by analyzing neuroimaging data. These are the models with high accuracy. Zeng et al [<xref ref-type="bibr" rid="ref26">26</xref>] classified schizophrenia with functional magnetic resonance imaging with 88% accuracy by finding prefrontal cortex dysconnectivity. The models possess an objective diagnosis capability, which decreases the reliance on subjective symptom reporting. However, CNNs have no interpretability, which impedes clinical adoption according to Bender et al [<xref ref-type="bibr" rid="ref27">27</xref>].</p></sec><sec id="s3-1-4"><title>Multimodal and Hybrid Diagnostic Systems</title><p>Emerging tools combine multiple data streams (ie, text+wearables+EHRs) to improve the precision of diagnosis. For instance, Tseng et al [<xref ref-type="bibr" rid="ref28">28</xref>] used NLP (Reddit posts), actigraphy (Fitbit data), and EHRs to predict depressive relapse (AUC=0.91), showing an improvement of 12% over models using one modality alone. Besides, there are multiple challenges, such as integrating disparate data sources that require interoperability standards. In addition, computational complexity is another challenge. The multimodal AI demands significant processing power. <xref ref-type="table" rid="table3">Table 3</xref> offers a comparison of the aforementioned AI-based diagnostic tools.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Comparative analysis of artificial intelligence (AI) diagnostic tools.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Tool</td><td align="left" valign="bottom">Strengths</td><td align="left" valign="bottom">Limitations</td><td align="left" valign="bottom">Best use case</td></tr></thead><tbody><tr><td align="left" valign="top">NLP<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></td><td align="left" valign="top">Low-cost, scalable, early risk detection</td><td align="left" valign="top">Cultural bias, privacy concerns</td><td align="left" valign="top">Population-level screening</td></tr><tr><td align="left" valign="top">Wearables</td><td align="left" valign="top">Real-time, objective biomarkers</td><td align="left" valign="top">Noise, adherence issues</td><td align="left" valign="top">Longitudinal monitoring of high-risk patients</td></tr><tr><td align="left" valign="top">Neuroimaging</td><td align="left" valign="top">High accuracy, biological insights</td><td align="left" valign="top">Costly, lacks interpretability</td><td align="left" valign="top">Second-line diagnostic validation</td></tr><tr><td align="left" valign="top">Multimodal AI</td><td align="left" valign="top">Holistic insights, high AUC<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td><td align="left" valign="top">Data silos, computational complexity</td><td align="left" valign="top">Complex cases (eg, treatment-resistant depression)</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>NLP: natural language processing.</p></fn><fn id="table3fn2"><p><sup>b</sup>AUC: area under the curve. </p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s3-2"><title>Accuracy and Early Intervention</title><p>AI can be used to enhance the accuracy of diagnosis and timely intervention during the treatment of mental health, which is crucial to mitigating the increasing number of untreated diseases. According to the WHO&#x2019;s focus on preemptive treatment to cut down on disability-adjusted life years [<xref ref-type="bibr" rid="ref1">1</xref>], AI-based early interventions such as chatbots offer cognitive behavioral prodromal phases. In a study conducted by Torous et al [<xref ref-type="bibr" rid="ref11">11</xref>], the severity of depression had reduced by around 22% in weeks in some cases. Darcy et al [<xref ref-type="bibr" rid="ref9">9</xref>] confirmed this observation and also noted that early intervention is extremely essential in such cases. However, the problems still exist including the fact that models, which are trained on the homogeneous sets of data, can overlook linguistic and demographic diversity [<xref ref-type="bibr" rid="ref22">22</xref>]. Clinicians may sluggish evidence-based human-led care due to the excessive use of AI without their control [<xref ref-type="bibr" rid="ref5">5</xref>]. To realize the potential of AI, future work should be based on the collection of representative data, real-world validity, and hybrid human-AI workflows, with an effort toward automation, including clinical expertise.</p></sec><sec id="s3-3"><title>AI in Therapeutic Interventions</title><p>AI is transforming therapeutic treatments for mental health conditions through supplementing conventional treatments such as CBT and enhancing access for underprivileged groups. AI-enabled interventions like chatbots (eg, Woebot and Wysa) provide guided CBT interventions by analyzing user responses through NLP and offering real-time, evidence-based feedback [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. For instance, Woebot decreased depression symptoms (Patient Health Questionnaire-9 scores) by 22% in a randomized trial through daily tracking and cognitive reappraisal exercises for users [<xref ref-type="bibr" rid="ref9">9</xref>]. AI-enabled interventions bridge important gaps in mental health treatment. Approximately 50% of the global population does not have access to clinicians, and stigma keeps almost 60% from visiting therapists in person [<xref ref-type="bibr" rid="ref1">1</xref>]. AI-enabled interventions overcome these impediments by being available 24/7 through the use of smartphones, making discreet and low-cost interventions to marginalized groups such as rural communities and low-income communities [<xref ref-type="bibr" rid="ref11">11</xref>]. Shortcomings still persist; however, chatbots have some difficulties with the handling of complex emergencies (eg, suicidal intent) and cannot offer the human touch in understanding others, rendering therapeutic alliance vulnerable [<xref ref-type="bibr" rid="ref30">30</xref>]. Models that liaise AI efficiencies with human oversight, for example, AI flagging high-risk cases for human assessment, have potential for optimizing scalability and safety [<xref ref-type="bibr" rid="ref31">31</xref>].</p></sec><sec id="s3-4"><title>Accessibility and Reach</title><p>AI brings mental health treatment to all levels across geographical, economic, and cultural boundaries. MindDoc and other peer support networks such as TalkLife use AI to deliver CBT mindfulness exercises that are available in 20+ languages and tailored to local idioms and social norms [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. For example, the Wysa application led to improvement in anxiety symptoms (Generalized Anxiety Disorder-7 scores) by approximately 30% for some Indian people by incorporating native metaphors for stress [<xref ref-type="bibr" rid="ref29">29</xref>]. AI also addresses staffing shortcomings: in sub-Saharan Africa, where the ratios are down to 1 psychiatrist for every 2 million people. In addition, chatbots can possibly provide some temporary help until clinical care is available [<xref ref-type="bibr" rid="ref1">1</xref>]. Yet, algorithmic bias remains a problem; models that are trained on data tend to misinterpret non-Western forms of distress, such as Asian somatic symptom cultures [<xref ref-type="bibr" rid="ref22">22</xref>]. To solve this, X2AI has made a multilingual chat, which translates therapeutic conversations for Arabic and Swahili speakers (it engaged people by approximately 40% more than generic tools) [<xref ref-type="bibr" rid="ref25">25</xref>]. Despite these advancements, a significant digital divide remains; approximately 30% of low-income individuals do not own smartphones, and older populations demonstrate lower adherence to AI-enabled mental health tools [<xref ref-type="bibr" rid="ref11">11</xref>].</p></sec><sec id="s3-5"><title>Clinical Integration: Augmenting Human Expertise</title><p>AI&#x2019;s role in clinical settings is to support and not replace human decision-making. Using electronically accessible information such as EHRs, wearable information, and patient-reported outcomes, AI can identify information that is invisible to clinicians, such as early signs of depressive relapse [<xref ref-type="bibr" rid="ref28">28</xref>]. For instance, machine learning&#x2013;based models for predicting antidepressant efficacy (AUC=0.76) allow for personalized selective serotonin reuptake inhibitor prescriptions; therefore, minimize trial and error delays [<xref ref-type="bibr" rid="ref10">10</xref>]. Clinicians using AiCure, an AI platform used to track medication adherence using facial recognition, showed nearly 25% higher patient compliance in schizophrenia trials [<xref ref-type="bibr" rid="ref31">31</xref>]. However, integration challenges remain; approximately 45% of therapists express distrust toward AI due to &#x201C;black box&#x201D; algorithms. Moreover, workflow disruptions often occur when AI recommendations conflict with clinical intuition [<xref ref-type="bibr" rid="ref5">5</xref>]. Regulatory frameworks such as the Food and Drug Administration&#x2019;s (FDA) SaMD (Software as a Medical Device) guidelines are equivalent to a standardization of AI validation, but nearly 15% of mental health applications meet evidence-based criteria [<xref ref-type="bibr" rid="ref33">33</xref>]. Training programs, such as the American Psychological Association&#x2019;s Digital Mental Health Certification, are upskilling clinicians to critically interpret output from AI so that they encourage collaboration, not competition [<xref ref-type="bibr" rid="ref11">11</xref>].</p></sec><sec id="s3-6"><title>AI for Supporting Informed Decisions</title><p>AI is aiding patient agency by bringing complex information into actionable knowledge. For example, MindDoc uses AI to analyze journal entries and wearable data and provide personalized psychoeducation information on what causes depression [<xref ref-type="bibr" rid="ref34">34</xref>]. Likewise, the neural network prediction model developed by Chekroud et al [<xref ref-type="bibr" rid="ref10">10</xref>] allows patients to see antidepressant efficacy rates in comparison, which encourages collaborative decision-making between clinicians and their clients. Overreliance upon AI, however, may contribute to automation bias, wherein patients blindly support algorithmic recommendations. In a trial for Woebot, 25% of clients remanded important choices to the chatbot, in disregard for disclaimers about its scope [<xref ref-type="bibr" rid="ref9">9</xref>]. To mitigate these concerns, tools such as IBM&#x2019;s AI Explainability 360 provide accessible, layman-friendly explanations for AI-driven recommendations. For example, a system might clarify that a user&#x2019;s sleep score decreased due to 3 specific events occurring after midnight during the past week, effectively bridging the gap between technical output and patient understanding [<xref ref-type="bibr" rid="ref35">35</xref>].</p></sec><sec id="s3-7"><title>Transparency of AI&#x2019;s Role</title><p>Transparency is key to ethical AI deployment. Patients have to be informed about when AI is affecting their care, what types of data are being used (eg, social media and EHRs), and how errors are being handled. The guidelines issued by the FDA on SaMD require mental health applications to report accuracy rates and failure modes. For example, Wysa openly admits that its NLP model sometimes misclassifies some sarcasm as suicidal ideation in 12% of cases [<xref ref-type="bibr" rid="ref29">29</xref>]. Clinicians who use AI tools such as Glimmer (a suicide risk predictor) are taught to put algorithmic risk scores in the context of qualitative patient narratives so that transparency does not undercut therapeutic trust [<xref ref-type="bibr" rid="ref13">13</xref>]. However, black box models are dominant in the field of mental health AI, where a review showed that up to 15% of studies that used deep learning provide model interpretability metrics [<xref ref-type="bibr" rid="ref36">36</xref>]. Emerging frameworks such as Local Interpretable Model-Agnostic Explanations (LIME) [<xref ref-type="bibr" rid="ref37">37</xref>] and the European Union&#x2019;s GDPR &#x201C;right to explanation&#x201D; would be pushing the field toward auditable AI, albeit with mixed compliance with the requirements in nonclinical applications [<xref ref-type="bibr" rid="ref14">14</xref>].</p></sec><sec id="s3-8"><title>Comparative Synthesis of AI Modalities in Mental Health Diagnosis</title><p><xref ref-type="table" rid="table4">Table 4</xref> presents a comparative synthesis of studies on various AI modalities used in mental health diagnosis. The study summarizes key aspects such as AI modality, study design, diagnostic accuracy, and population. In addition to this, the study also incorporated pooled diagnostic accuracy ranges. This synthesis allows for a clearer understanding of the relative strength of evidence across the included studies.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Comparative synthesis of artificial intelligence (AI) modalities in mental health diagnosis.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">AI modality</td><td align="left" valign="bottom">Study design</td><td align="left" valign="bottom">Outcome (diagnostic accuracy)</td><td align="left" valign="bottom">Population</td><td align="left" valign="bottom">Pooled diagnostic accuracy range</td><td align="left" valign="bottom">Key trends</td></tr></thead><tbody><tr><td align="left" valign="top">Gkotsis et al (2017) [<xref ref-type="bibr" rid="ref6">6</xref>]</td><td align="left" valign="top">LSTM<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup> (deep learning)</td><td align="left" valign="top">RCT<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td><td align="left" valign="top">89% accuracy for depressive language detection</td><td align="left" valign="top">English-speaking, social media users</td><td align="left" valign="top">89% accuracy</td><td align="left" valign="top">Scalable early depression screening</td></tr><tr><td align="left" valign="top">De Choudhury et al (2013) [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="top">LDA<sup><xref ref-type="table-fn" rid="table4fn3">c</xref></sup>+SVM<sup><xref ref-type="table-fn" rid="table4fn4">d</xref></sup></td><td align="left" valign="top">Retrospective cohort</td><td align="left" valign="top">AUC<sup><xref ref-type="table-fn" rid="table4fn5">e</xref></sup>=0.85 for depression onset prediction</td><td align="left" valign="top">Twitter users (adults)</td><td align="left" valign="top">81% to 89% accuracy</td><td align="left" valign="top">Passive social media monitoring for depression onset</td></tr><tr><td align="left" valign="top">Coppersmith et al (2018) [<xref ref-type="bibr" rid="ref21">21</xref>]</td><td align="left" valign="top">SVM+NLP<sup><xref ref-type="table-fn" rid="table4fn6">f</xref></sup></td><td align="left" valign="top">Cross-sectional</td><td align="left" valign="top">AUC=0.92 for suicide risk detection</td><td align="left" valign="top">Twitter users (general)</td><td align="left" valign="top">81% to 89% accuracy</td><td align="left" valign="top">Suicide risk detection, cultural bias in training data</td></tr><tr><td align="left" valign="top">Liu et al (2023) [<xref ref-type="bibr" rid="ref23">23</xref>]</td><td align="left" valign="top">XLM-RoBERTa<sup><xref ref-type="table-fn" rid="table4fn7">g</xref></sup></td><td align="left" valign="top">Cross-sectional</td><td align="left" valign="top"><italic>F</italic><sub>1</sub>=0.82 for depression or anxiety detection</td><td align="left" valign="top">Multilingual populations</td><td align="left" valign="top">81% to 89% accuracy</td><td align="left" valign="top">Framework for global deployment with multilingual data coverage</td></tr><tr><td align="left" valign="top">Guntuku et al (2017) [<xref ref-type="bibr" rid="ref24">24</xref>]</td><td align="left" valign="top">LIWC<sup><xref ref-type="table-fn" rid="table4fn8">h</xref></sup>+random forest</td><td align="left" valign="top">Retrospective Cohort</td><td align="left" valign="top"><italic>F</italic><sub>1</sub>=0.78 for anxiety detection</td><td align="left" valign="top">Reddit and Facebook users</td><td align="left" valign="top">70% to 81% <italic>F</italic><sub>1</sub>-score</td><td align="left" valign="top">Worry and rumination patterns as diagnostic markers</td></tr><tr><td align="left" valign="top">Tseng et al (2023) [<xref ref-type="bibr" rid="ref28">28</xref>]</td><td align="left" valign="top">NLP+actigraphy</td><td align="left" valign="top">Cohort+ EHRs<sup><xref ref-type="table-fn" rid="table4fn9">i</xref></sup></td><td align="left" valign="top">AUC=0.91 for depressive relapse prediction</td><td align="left" valign="top">Adults, depression patients</td><td align="left" valign="top">85% to 91% AUC</td><td align="left" valign="top">Multimodal AI outperforms single-modality models for relapse prediction</td></tr><tr><td align="left" valign="top">Jacobson et al (2020) [<xref ref-type="bibr" rid="ref7">7</xref>]</td><td align="left" valign="top">Wearables (HRV<sup><xref ref-type="table-fn" rid="table4fn10">j</xref></sup>)</td><td align="left" valign="top">Cohort</td><td align="left" valign="top"><italic>F</italic><sub>1</sub>=0.81 for anxiety episode prediction</td><td align="left" valign="top">Young, technology-literate individuals</td><td align="left" valign="top">70% to 81% <italic>F</italic><sub>1</sub>-score</td><td align="left" valign="top">Real-time anxiety episode prediction with wearables</td></tr><tr><td align="left" valign="top">Zeng et al (2018) [<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">CNN<sup><xref ref-type="table-fn" rid="table4fn11">k</xref></sup> (neuroimaging)</td><td align="left" valign="top">Cross-sectional</td><td align="left" valign="top">88% accuracy for schizophrenia diagnosis</td><td align="left" valign="top">Patients with schizophrenia</td><td align="left" valign="top">88% accuracy</td><td align="left" valign="top">High accuracy in detecting schizophrenia through neuroimaging</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>LSTM: long short-term memory. </p></fn><fn id="table4fn2"><p><sup>b</sup>RCT: randomized controlled trial. </p></fn><fn id="table4fn3"><p><sup>c</sup>LDA: latent Dirichlet allocation. </p></fn><fn id="table4fn4"><p><sup>d</sup>SVM: support vector machine. </p></fn><fn id="table4fn5"><p><sup>e</sup>AUC: area under the curve. </p></fn><fn id="table4fn6"><p><sup>f</sup>NLP: natural language processing.</p></fn><fn id="table4fn7"><p><sup>g</sup>XLM-RoBERTa: cross-lingual language model-robustly optimized BERT pretraining approach. </p></fn><fn id="table4fn8"><p><sup>h</sup>LIWC: Linguistic Inquiry and Word Count. </p></fn><fn id="table4fn9"><p><sup>i</sup>EHR: electronic health record.</p></fn><fn id="table4fn10"><p><sup>j</sup>HRV: heart rate variability. </p></fn><fn id="table4fn11"><p><sup>k</sup>CNN: convolutional neural network. </p></fn></table-wrap-foot></table-wrap><p>The past studies show a strong trend in AI&#x2019;s diagnostic capabilities across different modalities. NLP-based models show diagnostic accuracy ranging from around 81% to 89% for depression detection. Wearable sensors (eg, HRV) offer <italic>F</italic><sub>1</sub>-scores nearly from 0.70 to 0.81 for anxiety detection. In addition to this, the multimodal AI (combining text, wearables, and EHR data) offers the highest performance, with AUC ranging from 0.85 to 0.91, especially in predicting depression relapse and suicide risk.</p></sec><sec id="s3-9"><title>Legal and Regulatory Challenges in AI and Health Data Processing</title><sec id="s3-9-1"><title>Data Privacy and Security-Regulatory Challenges</title><p>The use of AI within mental health treatment has raised significant concerns about the confidentiality of sensitive psychiatric information, which, if inadequately protected, can stigmatize patients or compromise their professional lives [<xref ref-type="bibr" rid="ref25">25</xref>]. Mental health information, such as therapy transcripts, wearables&#x2019; biometric readings, and social media use, is particularly vulnerable because of its intrinsic nature and sheer amount needed for training AI models [<xref ref-type="bibr" rid="ref22">22</xref>]. For instance, NLP systems analyzing Reddit posts for depression risk may increase reidentification risks when metadata such as timestamp and style of writing are preserved [<xref ref-type="bibr" rid="ref24">24</xref>].</p><p>AI-driven therapeutic outreach requires rigorous, documented data protections to avoid legal and ethical liability. As seen in the BetterHelp case, the sale of user data to advertisers highlights how noncompliant data management leads to privacy breaches, though such vulnerabilities may arise from regulatory gaps rather than direct ethical misconduct [<xref ref-type="bibr" rid="ref25">25</xref>]. Biases perpetuate inequality in algorithmic decision-making; for instance, the NLP models misdiagnose pain from Black patients as &#x201C;drug-seeking&#x201D; approximately 35% more often than those from White patients [<xref ref-type="bibr" rid="ref36">36</xref>]. Excessive reliance on AI systems without adequate clinical oversight could undermine clinical abilities as well. This is reflected in findings that a subset of psychiatry residents reported reduced diagnostic confidence when AI tools were used [<xref ref-type="bibr" rid="ref5">5</xref>]. Fixes such as federated learning (training across decentralized data for maintaining privacy) and bias analysis through means like IBM&#x2019;s AI Fairness 360 [<xref ref-type="bibr" rid="ref38">38</xref>] are increasingly proposed as mitigation strategies.</p><p>To mitigate risks, the GDPR and HIPAA establish legal requirements for anonymizing data access control and patient consent [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. Nonetheless, there is still imbalanced technical compliance. Rocher et al [<xref ref-type="bibr" rid="ref40">40</xref>] revealed that a significant share of anonymized health information can be reidentified with the help of auxiliary data like the ZIP codes. Emerging techniques such as federated learning, where the AI models learn from decentralized data without raw data, reduce exposure [<xref ref-type="bibr" rid="ref38">38</xref>], whereas differential privacy adds statistical noise to the datasets, preventing reidentification [<xref ref-type="bibr" rid="ref41">41</xref>]. A research study showed that 45% of mental health applications have non-HIPAA&#x2013;compliant encryption, and 60% of applications share patient information with third-party advertisers [<xref ref-type="bibr" rid="ref33">33</xref>]. The WHO advocates for &#x201C;privacy by design&#x201D; frameworks, which require AI developers to apply such measures as end-to-end encryption and audit trails at the time of development [<xref ref-type="bibr" rid="ref42">42</xref>]. For instance, the Woebot limits data retention to 30 days only and anonymizes user responses; in addition, the cloud-based deployment continues to add residual security risks [<xref ref-type="bibr" rid="ref9">9</xref>]. Ultimately, balancing AI&#x2019;s clinical potential with ethical data stewardship requires proportionate governance followed by patient education and cross-sector coordination. Moreover, categorical restrictions shall be avoided to reduce exploitation risks while preserving innovation.</p></sec><sec id="s3-9-2"><title>Patient Autonomy and Ethical Concerns</title><p>The use of AI in mental health may raise ethical concerns regarding patient autonomy and informed consent. However, in addition, there is a focus in the GDPR on ensuring transparency and consent mechanisms to ensure that patients have control over their data and decisions. Although AI-enabled interventions like NLP-based therapy applications (eg, Woebot and Wysa) give the patients real-time mood analysis and tailored CBT exercises, a few studies hinted at the possibility of AI threatening patient autonomy if users are unable to challenge or comprehend algorithmic suggestions [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. For example, AI-enabled interventions for scrutinizing wearables for early patterns of anxiety [<xref ref-type="bibr" rid="ref7">7</xref>] may, in some cases, inadvertently disempower patient users in the interest of machine-driven outputs classically described as &#x201C;algorithmic paternalism&#x201D; [<xref ref-type="bibr" rid="ref5">5</xref>]. To counter this, principles for informed consent require further refinement. Patients need not only know how their data are being used but also understand that AI models may contain limitations including biases arising from their learning datasets (eg, underrepresentation of non-Western populations as claimed by Harrigian et al [<xref ref-type="bibr" rid="ref22">22</xref>]). Patient experience has already demonstrated that when given understandable outputs from AI (eg, graphical explanations for how sleeping patterns relate to shifts in mood), patient buy-in increases up to 30% for treatment planning [<xref ref-type="bibr" rid="ref37">37</xref>]. Yet, approximately 40% of users from low-literacy populations cannot meaningfully decode expert explanations, widening health inequity [<xref ref-type="bibr" rid="ref43">43</xref>]. Models that combine AI functionality for clinician-augmented interpretation, such as AiCure&#x2019;s clinician dashboard [<xref ref-type="bibr" rid="ref31">31</xref>], provide an example of where transparency and autonomy can occur together and not at the expense of efficacy.</p></sec><sec id="s3-9-3"><title>Ethical and Diagnostic Concerns in AI-Enabled Mental Health Systems</title><p>AI holds promise, but it also brings risks such as opaque decision-making and algorithmic bias. Legal frameworks like GDPR and WHO guidelines recommend regular bias audits and transparency in AI decision-making processes to mitigate these risks [<xref ref-type="bibr" rid="ref44">44</xref>]. For example, NLP models trained on predominantly White, English-speaking populations have been shown to misdiagnose Black patients&#x2019; linguistic expressions of distress as &#x201C;low risk&#x201D; up to 35% more often than White patients [<xref ref-type="bibr" rid="ref36">36</xref>]. Similarly, wearable algorithms may conflate physical activity with manic episodes in bipolar disorder, potentially leading to inappropriate alerts [<xref ref-type="bibr" rid="ref45">45</xref>]. These errors are compounded with digital health literacy gaps: it has been found that about 60% of older patients are not able to distinguish AI recommendations from human advice and may as such be more susceptible to manipulation [<xref ref-type="bibr" rid="ref43">43</xref>]. Regulatory solutions, such as WHO&#x2019;s ethical guidelines for AI in health [<xref ref-type="bibr" rid="ref42">42</xref>], call for mandatory audits of bias and patient-led oversight committees. Meanwhile, participatory design methods such as Torous et al [<xref ref-type="bibr" rid="ref11">11</xref>] that involve patients in AI tool development are showing success in connecting technologies to user needs.</p></sec><sec id="s3-9-4"><title>Accountability and Transparency in AI Systems</title><p>Accountability for AI-powered mental health treatment is essential in order to counteract mistakes that may cause harm to patients, including misdiagnosis or inappropriately suggesting treatment. Regulatory frameworks such as the European Union&#x2019;s GDPR and the FDA&#x2019;s SaMD guidelines require shared responsibility among developers, clinicians, and organizations for AI-related outcomes [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. For example, when the public was distrustful of Babylon Health&#x2019;s chatbot after it erroneously brushed off a user&#x2019;s chest pain as anxiety because of inadequate, well-defined accountability policies [<xref ref-type="bibr" rid="ref5">5</xref>]. Models of accountability such as the human-in-the-loop help clinicians to check AI output before it is put to use. A study published in 2022 documented that clinics applying AiCure, an AI for medication compliance, decreased errors up to 40% when cross-checked by clinicians in response to algorithmic signals [<xref ref-type="bibr" rid="ref31">31</xref>]. Gaps remain; however, approximately 30% of mental health applications disclose liability terms, leaving patients vulnerable [<xref ref-type="bibr" rid="ref11">11</xref>].</p><p>Beyond these regulatory requirements, a problem that is yet to be addressed adequately is what should be done about liability when harm results from AI-assisted care. Should liability be on the developers that design and train the AI systems, the physicians who use them, or the health care institutions? Lack of clear rules of liability makes the patients skeptical. Addressing this gap will require interaction between legislators, regulators, and professional bodies to ascertain transparent liability rules [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref46">46</xref>].</p></sec><sec id="s3-9-5"><title>Explainable Artificial Intelligence for Trust and Adoption</title><p>The &#x201C;black box&#x201D; of AI models dilutes trust in mental health applications. Explainable artificial intelligence (XAI) frameworks such as LIME and Shapley Additive Explanations (SHAP) assist in explaining the decision-making process of AI by giving importance to important predictors [<xref ref-type="bibr" rid="ref37">37</xref>]. For example, IBM&#x2019;s AI Explainability 360 renders the depression risk scores into patient-readable insights (eg, &#x201C;Your sleeping patterns accounted for 60% for this prediction&#x201D;), while studies have shown that user trust increased up to 35% [<xref ref-type="bibr" rid="ref35">35</xref>]. By comparison, conventional models such as CNNs applied in functional magnetic resonance imaging&#x2013;based schizophrenia diagnosis are accurate to approximately 88% but offer limited interpretability, which may deter clinician uptake [<xref ref-type="bibr" rid="ref26">26</xref>]. Decision trees learned from wearables for anxiety detection provide approximately 81% accuracy along with perfect transparency, although they struggle in complex scenarios [<xref ref-type="bibr" rid="ref7">7</xref>]. Accordingly, the accuracy-interpretability trade-off remains an open methodological challenge rather than a resolved limitation. Besides, Arrieta et al [<xref ref-type="bibr" rid="ref47">47</xref>] state that XAI has to balance technical sophistication and clinical usability and adapt explanations for patient literacy levels.</p></sec><sec id="s3-9-6"><title>Ongoing Audits and Bias Mitigation</title><p>Ongoing evaluation is required to keep AI programs fair and effective over time after deployment. Obermeyer et al [<xref ref-type="bibr" rid="ref36">36</xref>] discovered that an algorithm implemented in some US hospitals systematically underestimated the mental health needs of Black individuals due to embedded bias in its training data, expanding health disparities. Regular audits done through mechanisms like IBM&#x2019;s AI Fairness 360 or Google&#x2019;s What-If Tool can help to identify such biases [<xref ref-type="bibr" rid="ref48">48</xref>]. For example, an audit for Woebot identified its NLP algorithm misattributing African American Vernacular English words for distress as low risk and underwent retraining using diverse datasets [<xref ref-type="bibr" rid="ref22">22</xref>]. Federated learning platforms such as those used in Sheller et al [<xref ref-type="bibr" rid="ref38">38</xref>] enable multi-institution bias checks without sharing information, while secrecy is ensured. Compliance remains incomplete; however, approximately 20% of applications for mental health are audited every year, and less than 10% publish results [<xref ref-type="bibr" rid="ref33">33</xref>]. The WHO&#x2019;s ethical framework advocates for mandatory third-party audits and patient representation in oversight structures to provide accountability [<xref ref-type="bibr" rid="ref13">13</xref>].</p></sec><sec id="s3-9-7"><title>Integration of Legal and Empirical Findings</title><p>This section interlinks together the legal and regulatory framework (especially, the GDPR) of the empirical results of studies reviewed. The examination of these legal principles in practice helps to understand better the ethical and regulatory issues of using AI for mental health.</p><sec id="s3-9-7-1"><title>Data Privacy and Security</title><p>GDPR Article 5 encourages data minimization, that is, which means that only data that are necessary should be processed. In this regard, research like Guntuku et al [<xref ref-type="bibr" rid="ref24">24</xref>] presented the ethical concerns of AI models where the collection of data is at times way more than what is required for mental health assessments. However, it is not clear from these studies whether the overabundance of data gathering reflected is a result of deliberate noncompliance or lack of awareness of data minimization requirements (under GDPR). For example, in the case of interventions with the help of AI (wearables and monitoring tools on social media are examples), there is no explicit reference to whether these studies followed proper procedures to ensure that only relevant data were processed.</p><p>This brings up interesting questions of whether the use of AI applications in mental health research adheres actively to the concept of data minimization within GDPR, or if applications are used in the gray area where a large amount of data is collected but not necessarily anonymized and minimized. Therefore, future research has to be specific with the compliance mechanisms in place to avoid exploitation of sensitive information and make sure that AI systems only process information needed for their intended purpose.</p></sec><sec id="s3-9-7-2"><title>Transparency and Accountability</title><p>GDPR Article 5 also requires transparency in the processing of personal data. Liu et al [<xref ref-type="bibr" rid="ref23">23</xref>] examined the ethical implications of privacy in AI applications. The study noted that insufficient transparency regarding how AI models process personal data may reduce user trust. The study found that AI-enabled interventions in mental health care must disclose how they collect and use data to comply with GDPR. These disclosures are essential for ensuring that patients and users can trust AI systems with their sensitive mental health data.</p><p>The studies in many cases provide broad explanations or general statements about data protection measures without providing specifics regarding how the transparency requirements of the GDPR are being satisfied. In addition, it is unclear if the studies clearly documented their efforts to be compliant with transparency regulations. Thus, while the transparency issue is often discussed, in practice, there is not always visible comprehensiveness in regard to GDPR and legal standards. To address this, future studies should have comprehensive disclosures on how they handle data, and they should align themselves with the legal obligations under GDPR to ensure that the data rights of patients are fully respected.</p></sec><sec id="s3-9-7-3"><title>Explicit Consent</title><p>GDPR Article 9 requires obtaining explicit consent in order to process personal data that are considered sensitive, including health data. Rocher et al [<xref ref-type="bibr" rid="ref40">40</xref>] stated that many AI systems did not put proper consent mechanisms in place; thus, it is important to analyze them. Many studies have mentioned consent in passing, without specifying the method, if any, of the consent process, or if users were well-informed about the process of data collection. Given the lack of proper documentation on the informed consent process in these studies, there is concern about the extent to which these studies adhered to legal and ethical requirements outlined in the GDPR. It is also critical to discuss whether these studies were fully compliant with the explicit consent requirements of GDPR or if they simply bypassed or minimized the importance of properly placing consent procedures. There is a need for future research to make sure explicit description of consent protocols is provided, demonstrating clear adherence to GDPR and other ethical guidelines.</p></sec><sec id="s3-9-7-4"><title>Purpose Limitation</title><p>GDPR Article 5 focuses on how personal data can only be used for the defined purpose. However, according to De Choudhury et al [<xref ref-type="bibr" rid="ref20">20</xref>], some AI models, in order to predict depression, reused data from social media outside the scope of the intended purpose, which is a concern of misuse of data. While the studies particularly point out such ethical issues, the studies may not have actually been compliant with the purpose limitation of GDPR or may not have discussed these without following the regulation.</p><p>The problem of purpose creep, whereby the data are used for other than the original purpose, needs more analysis. We must explore whether these AI systems in these studies were actively in line with GDPR&#x2019;s restrictions on how they could use data or if the studies could operate adjacent to these restrictions, raising significant concerns of data exploitation. To ensure compliance, the AI systems will need to be designed such that strict limitations in how a purpose will be used are included, and future research should show that adherence to the principle is evidenced through clear documentation.</p></sec></sec></sec><sec id="s3-10"><title>Legal and Regulatory Constraints Under GDPR</title><sec id="s3-10-1"><title>Overview</title><p>In Europe, the GDPR, which is arguably the most sophisticated data privacy regulation in the world, places significant legal restrictions on how AI systems can process mental health data. This entails challenges to the principles of design for the operation of data-centric AI frameworks.</p><p>Pursuant to Article 9 of the GDPR, &#x201C;special categories of personal data,&#x201D; which includes mental health status, biometric, and genetic data, may not be processed unless one of the explicit legal bases exemplified by explicit consent, necessity for vital interest, or scientific research under adequate safeguards applies (GDPR, 2018, Article 9.2). For developers of AI systems and particularly those concerned with mental health, the limits of the law represent a unique challenge when one considers the exhaustiveness and comprehensiveness of data often collected from digital therapy bots, wearables, and social media that is assumed to be collected without informed consent and without substantive transparency mechanisms. Moreover, even without personally identifiable information, the data could be subject to regulatory carving out if it is legally identifiable by relevant ancillary information, as argued by Rocher et al [<xref ref-type="bibr" rid="ref40">40</xref>].</p><p>In conjunction with Article 9, Article 5 provides a set of guidelines for lawful data processing, which are often contravened by AI systems integrated in mental health technologies. These are as follows:</p><list list-type="bullet"><list-item><p>Legitimacy, propriety, and openness (Article 5.1.a): Patients should be informed of how AI systems use algorithms to manage their data, and more significantly, patients must be provided with genuine explanations. Unfortunately, most mental health care applications only provide vague or no explanations at all [<xref ref-type="bibr" rid="ref33">33</xref>].</p></list-item><list-item><p>Boundaries of purpose (Article 5.1.b): Data obtained should be confined only to the specified boundaries set by the collecting entity. There are numerous examples such as the BetterHelp case, where user data were repurposed for marketing [<xref ref-type="bibr" rid="ref49">49</xref>].</p></list-item><list-item><p>Data minimization (Article 5.1.c): AI-driven models may raise concerns about data minimization if they collect more data than required. However, GDPR allows data processing within the confines of explicit consent that basically ensures that only relevant data are collected and processed.</p></list-item><list-item><p>Accuracy (Article 5.1.d) and storage limitation (Article 5.1.e): AI-performing systems should work within the scope of current and valid information. Data should be kept for only the necessary amount of time. However, there are numerous studies operating on outdated and Western datasets with no clear end point to data retention, which increases the potential for errors and abuse of information [<xref ref-type="bibr" rid="ref22">22</xref>].</p></list-item></list><p>The debate above highlights a larger conflict between people&#x2019;s right to data autonomy and dignity and AI&#x2019;s voracious demand for huge data. They impose ethical and legal restrictions that require AI systems used in health care to be purposefully created with compliance, openness, and restraint in mind.</p><p>The European Union Artificial Intelligence Act, which came into effect in 2024, expands on existing frameworks by introducing additional legal requirements unique to AI. Because mental health AI applications have the potential to affect psychological well-being, safety, and fundamental rights, they are likely to be classified as &#x201C;high risk&#x201D; under the act.</p></sec><sec id="s3-10-2"><title>Purpose Limitation and Commercial Exploitation</title><p>The principal limitations of purpose set forth by the GDPR restrain companies from exploiting user data without obtaining new consent, which is often ignored by companies repurposing monetizing user data. BetterHelp, a teletherapy service, for instance, is under fire for disclosing sensitive user information to advertisers like Facebook and Pinterest even when they marketed themselves as maintaining users&#x2019; confidentiality [<xref ref-type="bibr" rid="ref25">25</xref>]. AI advertising and cloud hosting services lacking GDPR-compliant frameworks pose unchecked structural risks when partnered with third-party advertising and cloud hosting services. These incidents exemplify the risks clinicians and health care AI developers face when working with third-party advertising and cloud hosting services lacking GDPR-compliant frameworks.</p></sec><sec id="s3-10-3"><title>Cross-Border Fragmentation and Equity Issues</title><p>The above cases illustrate how the extraterritorial nature of the GDPR inhibits the global deployment of AI technology. After the annulment of the Privacy Shield agreement in 2020 (Schrems II), the use of Standard Contractual Clauses or Binding Corporate Rules for Montreal-based AI companies significantly hampers research collaboration and expeditions with European Union mental health data. Gaps left by HIPAA&#x2019;s limited focus on health care providers enable consumer-facing applications to exploit users. The permissive enforcement policies in low- and middle-income countries serve to capture exploitable data without legal consequences, redirecting data-capturing streams.</p></sec></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Diagnostic Innovation</title><p>AI technologies, for instance, NLP and wearables integrated with biometric sensors, have demonstrated remarkable promise in improving the early detection of mental health conditions. However, a critical appraisal of the evidence reveals a significant imbalance in study types. The NLP-based models trained on social media data have shown approximately 89% accuracy in detecting depression and an AUC of 0.92 for suicide risk detection as identified by Gkotsis et al [<xref ref-type="bibr" rid="ref6">6</xref>] and Coppersmith et al [<xref ref-type="bibr" rid="ref21">21</xref>]. While these results are descriptively rich, the strength of this evidence is tempered by its retrospective nature and the disproportionate representation of NLP studies in current literature, which often lack the prospective validation required for clinical diagnostic standards.</p><p>The wearable devices like Empatica E4 have shown <italic>F</italic><sub>1</sub>-score around 0.81 in predicting anxiety episodes in some cases [<xref ref-type="bibr" rid="ref7">7</xref>]. These technologies are particularly beneficial in relation to scalability as well as cost-effectiveness. Further, they offer real-time tracking that has the potential to improve access to mental health care in resource-limited settings significantly. In addition, the advances in neuroimaging AI, including CNNs in schizophrenia diagnosis, have further reduced the importance of symptoms as reported by the patients [<xref ref-type="bibr" rid="ref26">26</xref>].</p><p>Further, AI-enabled diagnostic tools consistently show high accuracy; nevertheless, when weighing these conclusions, it is vital to note that &#x201C;high accuracy&#x201D; in a controlled dataset does not always translate to real-world efficacy. However, there are still challenges such as the cultural bias in the NLP models. For instance, models trained on Western data often predict poorly for any non-English data as explained by Harrigian et al [<xref ref-type="bibr" rid="ref22">22</xref>]. Additionally, wearables have problems such as sensor noise and adherence, which are major challenges in terms of real-world applications [<xref ref-type="bibr" rid="ref11">11</xref>]. Therefore, while the volume of diagnostic evidence is high, the &#x201C;certainty&#x201D; of its impact in diverse clinical settings remains moderate due to these generalizability gaps. Furthermore, multimodal AI systems that combine EHRs with wearable data hold promise but require interoperability standards to function seamlessly, as highlighted by Tseng et al [<xref ref-type="bibr" rid="ref28">28</xref>].</p></sec><sec id="s4-2"><title>Therapeutic Augmentation</title><p>AI-driven interventions such as Woebot and MindDoc provide access to personalized feedback and self-management tools, which are available 24/7. In contrast to the high volume but often retrospective NLP research, therapeutic AI research, though smaller in number of studies, often uses rigorous methods such as randomized controlled trials. These tools have demonstrated major promise with Woebot, showing an approximate 30% reduction in anxiety symptoms in clinical trials as claimed by Darcy et al [<xref ref-type="bibr" rid="ref9">9</xref>] and Brancati et al [<xref ref-type="bibr" rid="ref34">34</xref>].</p><p>This stratification of evidence suggests that although we have more data on AI&#x2019;s potential for diagnosis (NLP), the evidence of AI&#x2019;s potential for therapeutic effectiveness (chatbots) is often backed by higher-quality, prospective evidence. These interventions give the power to patients and are evolving personalized and scalable treatment beyond the clinic hours. AI chatbots like Woebot have proven to be effective when it comes to personalizing care and offering more engagement to the patients, especially those who live in urban and rural spaces. However, one important limitation is the lack of empathy displayed by such AI systems (something required in cases of high risk, such as suicidal thoughts or episodes of severe depression). Additionally, there can be hazards of algorithmic reliance, as about 25% of the patients reported their not being able to trust AI for decision-making [<xref ref-type="bibr" rid="ref9">9</xref>].</p><p>Arya et al [<xref ref-type="bibr" rid="ref35">35</xref>] and WHO [<xref ref-type="bibr" rid="ref42">42</xref>] studied XAI methods such as LIME and SHAP to address this issue by providing an explanation for AI results. However, it has been reported that 40% of lower poverty clients find explanations using technical descriptions difficult [<xref ref-type="bibr" rid="ref43">43</xref>]. Ultimately, the relative strength of evidence for therapeutic augmentation is high in terms of methodology but limited by smaller sample sizes compared to the massive datasets that are used in NLP-based screening.</p></sec><sec id="s4-3"><title>Patient Autonomy</title><p>AI tools have the potential to enhance patient autonomy greatly by giving real-time feedback, psychoeducation, and self-management tools such as MindDoc and Woebot. These interventions are essential for reducing stigma and increasing engagement by providing private access support. However, as the AI involvement in patient decision-making increases, questions about the overreliance and algorithmic paternalism also emerge. AI has proven useful in empowering patients by providing them with tools for self-management of their own mental health. However, there is a growing dependence on algorithms, and nearly 25% of patients trust the decisions that AI makes over those that clinicians make [<xref ref-type="bibr" rid="ref9">9</xref>]. The digital literacy gap is still a big challenge. Many patients, especially those who are from lower socioeconomic strata or from the aged population, may have difficulty properly engaging with the tools with AI integration. Additionally, AI-enabled interventions without human empathy may not be as effective in building actual patient engagement. There are dangers of biases in AI, as models misdiagnose marginalized groups, for example, Black people forming words [<xref ref-type="bibr" rid="ref36">36</xref>]. In addition, Rocher et al [<xref ref-type="bibr" rid="ref40">40</xref>] warn that sensitive data, for example, social media and biometric inputs, to be at risk of reidentification, raising privacy concerns.</p></sec><sec id="s4-4"><title>Legal Accountability</title><p>As AI continues to be integrated into mental health care, the issue of legal accountability is a key one. Frameworks, such as GDPR and HIPAA, are making key guidelines for patient privacy. However, there are, however, concerns regarding compliance and accountability, where around 45% of mental health applications do not have HIPAA-compliant encryption [<xref ref-type="bibr" rid="ref33">33</xref>]. There is a resulting growing consensus about the need for strong regulatory frameworks for ensuring that AI systems comply with privacy standards. However, the lack of clear definitions about liability in case of mistakes or misdiagnosis by using AI is still a major concern. AI models are often described as &#x201C;black boxes&#x201D;; thus, it is difficult to assign liability for errors. Moreover, no audits to verify bias and XAI tools in many systems bring questions into the topic of algorithmic fairness and transparency [<xref ref-type="bibr" rid="ref48">48</xref>].</p><p>AI&#x2019;s uses also require clear guidelines in the ethical standards aspect as a result of the problems regarding openness, reduction of biases, and involvement of multidisciplinary teams. The problem of a lack of accountability frameworks remains [<xref ref-type="bibr" rid="ref5">5</xref>], but approaches such as federated learning [<xref ref-type="bibr" rid="ref38">38</xref>] and differential privacy [<xref ref-type="bibr" rid="ref41">41</xref>] promise ways of solving the data privacy and fairness problem in algorithms. The ethical framework for these systems should include mandatory testing for bias, transparency requirements on XAI, as well as compliance with privacy regulations on the order of GDPR and HIPAA.</p><p>Studies such as Jobin et al [<xref ref-type="bibr" rid="ref14">14</xref>] and Nwosu et al [<xref ref-type="bibr" rid="ref31">31</xref>] were concerned about the centrality of accountability for clinical AI applications. A few other studies had some measure to improve medication adherence with the use of AI tools such as AiCure [<xref ref-type="bibr" rid="ref31">31</xref>], despite the general skepticism from therapists about &#x201C;black-box&#x201D; AI models [<xref ref-type="bibr" rid="ref5">5</xref>]. A study of Kandeel et al [<xref ref-type="bibr" rid="ref50">50</xref>] highlights the importance of interdisciplinary collaboration between clinicians, AI developers, and ethicists to ensure that these systems uphold patient autonomy, equity, and ethical standards. In the larger context of the regulatory frameworks, Afify et al [<xref ref-type="bibr" rid="ref3">3</xref>] highlighted the role of environmental law, underlining the importance of the legal provisions in managing new emerging challenges in technology. The study was focused on AI in mental health. The study highlights the importance of adequate legislation to reduce the threat of new environmental change, which is similar to providing accountability and fairness in the application of AI in health care. Similarly, Mohamed [<xref ref-type="bibr" rid="ref46">46</xref>] outlined the legal consequences and accountability of fully AI-based surgeries. The summation of the paper under review about the application of AI in mental health care is further represented in <xref ref-type="table" rid="table5">Table 5</xref>.</p><table-wrap id="t5" position="float"><label>Table 5.</label><caption><p>Summary of artificial intelligence (AI) in mental health care.</p></caption><table id="table5" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Domain</td><td align="left" valign="bottom">Key applications</td><td align="left" valign="bottom">Benefits</td><td align="left" valign="bottom">Challenges</td><td align="left" valign="bottom">Ethical concerns</td></tr></thead><tbody><tr><td align="left" valign="top">Diagnosis</td><td align="left" valign="top">NLP<sup><xref ref-type="table-fn" rid="table5fn1">a</xref></sup> (social media or text analysis), wearables (biometrics), neuroimaging (fMRI<sup><xref ref-type="table-fn" rid="table5fn2">b</xref></sup> or EEG<sup><xref ref-type="table-fn" rid="table5fn3">c</xref></sup>)</td><td align="left" valign="top">Early detection (eg, 89% accuracy for depression), scalability, low cost</td><td align="left" valign="top">Cultural bias (eg, 15% accuracy drop in non-English data), sensor noise</td><td align="left" valign="top">Privacy risks (eg, reidentification), lack of informed consent</td></tr><tr><td align="left" valign="top">Treatment</td><td align="left" valign="top">AI chatbots (Woebot and Wysa), predictive models, VR<sup><xref ref-type="table-fn" rid="table5fn4">d</xref></sup> therapy</td><td align="left" valign="top">24/7 accessibility, personalized interventions (eg, 30% anxiety reduction)</td><td align="left" valign="top">Overreliance on AI, lack of human empathy, limited crisis handling</td><td align="left" valign="top">Algorithmic paternalism, erosion of therapeutic alliance</td></tr><tr><td align="left" valign="top">Patient empowerment</td><td align="left" valign="top">Mood-tracking applications (MindDoc), AI-driven psychoeducation, peer-support platforms</td><td align="left" valign="top">Enhanced self-management, reduced stigma, real-time feedback</td><td align="left" valign="top">Digital literacy gaps, algorithmic overreach</td><td align="left" valign="top">Exploitation of vulnerable users (eg, adolescents)</td></tr><tr><td align="left" valign="top">Privacy and security</td><td align="left" valign="top">Federated learning, differential privacy, GDPR<sup><xref ref-type="table-fn" rid="table5fn5">e</xref></sup> or HIPAA<sup><xref ref-type="table-fn" rid="table5fn6">f</xref></sup> compliance</td><td align="left" valign="top">Reduced reidentification risks, decentralized data training</td><td align="left" valign="top">45% applications lack encryption, data sold to third parties (eg, BetterHelp scandal)</td><td align="left" valign="top">Inequitable access, marginalized group exclusion</td></tr><tr><td align="left" valign="top">Accountability</td><td align="left" valign="top">Explainable AI (LIME<sup><xref ref-type="table-fn" rid="table5fn7">g</xref></sup> and SHAP<sup><xref ref-type="table-fn" rid="table5fn8">h</xref></sup>), bias audits (IBM AI Fairness 360)</td><td align="left" valign="top">Improved trust, clinician-AI collaboration (eg, 40% error reduction)</td><td align="left" valign="top">Black-box models, inconsistent regulatory compliance</td><td align="left" valign="top">Misdiagnosis of marginalized groups (eg, racial bias in NLP)</td></tr></tbody></table><table-wrap-foot><fn id="table5fn1"><p><sup>a</sup>NLP: natural language processing.</p></fn><fn id="table5fn2"><p><sup>b</sup>fMRI: functional magnetic resonance imaging. </p></fn><fn id="table5fn3"><p><sup>c</sup>EEG: electroencephalogram. </p></fn><fn id="table5fn4"><p><sup>d</sup>VR: virtual reality. </p></fn><fn id="table5fn5"><p><sup>e</sup>GDPR: General Data Protection Regulation. </p></fn><fn id="table5fn6"><p><sup>f</sup>HIPAA: Health Insurance Portability and Accountability Act.</p></fn><fn id="table5fn7"><p><sup>g</sup>LIME: Local Interpretable Model-Agnostic Explanations.</p></fn><fn id="table5fn8"><p><sup>h</sup>SHAP: Shapley Additive Explanations. </p></fn></table-wrap-foot></table-wrap></sec><sec id="s4-5"><title>Study Limitations</title><p>This review has a few limitations, which should be taken carefully into consideration while interpreting the overall study findings. First, in this study, the use of only English-language papers was considered. While this is a common approach in systematic reviews, this perhaps may have led to the exclusion of relevant studies if the paper was written in other languages. This will bring potential bias in language and will restrict the global representativeness of the evidence base (especially from non-English languages).</p><p>Second, there is a great heterogeneity across the included studies. The studies encompass several variations in study designs such as different uses of AI techniques, applications for mental health conditions, outcome measures, and considered evaluation frameworks. Due to this heterogeneity, it was not possible to carry out a meta-analytical synthesis. This review presents a broad overview; thus, the limited results of pooled statistical analysis may restrict the provision of definitive conclusions on the effective use of AI in mental health in various contexts. Consequently, the findings should be interpreted as a qualitative and thematic synthesis and not as conclusive evidence of efficacy.</p><p>Third, the included studies reported diverse and nonstandardized outcomes; therefore, comparability across studies was a major constraint. This heterogeneity in outcome definitions and ways of measurement may have impacted the consistency and generality of the conclusions drawn on issues of diagnostic accuracy, treatment effectiveness, and patient empowerment.</p><p>In addition, there was no registration of formal protocols (eg, PROSPERO) included in the review. Besides, the set systematic review guidelines were followed fully. The lack of previous registration could lead to a lower degree of transparency in a priori methodological choices and a less comprehensive ability to formally evaluate possible deviations from the planned review process.</p><p>Finally, there are legal, ethical, and regulatory interpretations of AI in mental health that also vary considerably across different regions. As a result, the findings relating to privacy, data protection, and regulatory compliance, particularly those which refer to frameworks such as GDPR and emerging AI regulations, may not be equally applicable in all legal contexts. This jurisdictional variability is a limitation in directly transferring legal and policy-related conclusions.</p><p>Despite these limitations, this review offers a structured and comprehensive synthesis of the available literature and identifies key research gaps, methodological challenges, and regulatory considerations that can inform future empirical research and policy development in the context of AI-enabled mental health care.</p></sec><sec id="s4-6"><title>Review Recommendations</title><p>The study, after reviewing several papers, offers the following recommendations:</p><list list-type="bullet"><list-item><p>AI models should be trained on multilingual and diverse datasets, in a way that would make them culture-independent. This will eliminate prejudice and guarantee accuracy in mental health diagnoses across the world.</p></list-item><list-item><p>Enact strong ethical frameworks that extend into the areas of data privacy and patient-informed consent and algorithm transparency. Implement ideas of existing regulatory frameworks such as GDPR and HIPAA for data protection of patients.</p></list-item><list-item><p>Participatory frameworks to develop region-specific AI models that incorporate local patients and practitioners.</p></list-item><list-item><p>Work on differential privacy (adding statistical noise to datasets) or federated learning (training AI on decentralized datasets without data sharing), adhering to GDPR regulations.</p></list-item><list-item><p>Promoting international data governance agreements to expedite international health research while upholding local customs.</p></list-item><list-item><p>Implement XAI techniques like LIME and SHAP in a system that would provide explanations for AI recommendations in a way that is understandable. Such transparency will encourage trust between patients and clinicians with AI recommendations.</p></list-item><list-item><p>Design AI-supported models for additive decision support for clinicians instead of replacing human judgment. Using hybrid models that enhance accuracy in diagnosis and treatment planning while preserving the human touch in health care.</p></list-item><list-item><p>As part of routine checking, regular audits should be carried out using tools like IBM&#x2019;s AI Fairness 360 to identify and rectify biases within AI algorithms.</p></list-item></list><p>Overall, based upon a critical and systematic review, comparison, and coherence of studies, this study develops a review of findings as illustrated in <xref ref-type="fig" rid="figure2">Figure 2</xref>.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Major review outcome of AI applications in mental health based upon data and bias for proper applications. AI: artificial intelligence; API: application programming interface; GDPR: General Data Protection Regulation.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e84305_fig02.png"/></fig></sec><sec id="s4-7"><title>Conclusions</title><p>The widespread adoption of AI in mental health represents a significant technological evolution. Besides, its role as a paradigm shift remains a future potential rather than a current clinical reality. In addressing mental health care needs, AI demonstrates potential as a supportive tool for screening, monitoring, and digitally delivered interventions. Nonetheless, current evidence remains heterogeneous and context-dependent, with the strength of conclusions often limited by the varying risk of bias across included studies. Claims regarding cost-effectiveness, cultural sensitivity, and large-scale clinical impact require further empirical validation through robust, longitudinal, and comparative studies that prioritize high-certainty evidence. AI-enabled devices, such as NLP models, wearables equipped with biometric sensors, and neuroimaging procedures, have been found to be useful in the diagnosis of mental health disorders such as depression, anxiety, and schizophrenia. NLP models from social media, for example, have been found to be extremely accurate at the detection of suicide and depression, although the results are highly underrepresented elsewhere in terms of clinical trials and are instead collected retrospectively. Wearables such as Expatica&#x2019;s E4 have been found helpful for real-time physiology monitoring.</p><p>As these technological advancements are significant, the weighting of their clinical impact must be balanced against the prevalence of moderate-to-high risk of bias in the current literature. The use of AI for the treatment of mental health has high ethical, technical, and practical implications. Data privacy concerns about algorithmic bias and the need for culturally sensitive devices bring home the difficulty of integration of AI in clinical practice. Use-based dependency upon AI devices has raised the problem of patient agency and algorithmic dependency, particularly if a significant judgment is being left in the hands of chatbots. These concerns should be addressed through adequate ethical guidelines, stratification of evidence courtesy of its real-world applicability, involvement of stakeholders, and constant evaluation of AI technology for the greatest potential of AI in mental health treatment. All these will allow AI as an aid for human expertise and not as a substitute for human expertise, while ensuring patient confidence and safety, and improving the quality and access of mental health services. Additionally, AI has the potential to revolutionize mental health care, though the necessary regulatory frameworks, such as GDPR and HIPAA, have to have a complete integration with the development and deployment of these systems. For AI to be effective and ethical in the treatment of mental health, a continued effort of research and development involves the importance of a rigorous analytical framework, regulatory compliance, transparency, and patient autonomy. By addressing these challenges and focusing on high-quality, prospectively validated evidence, AI can be implemented in a manner that ensures data privacy, reduces algorithmic bias, and promotes trust between patients and health care providers.</p></sec><sec id="s4-8"><title>Future Research Directions</title><p>The future research directions are as follows:</p><list list-type="bullet"><list-item><p>Perform longitudinal research in order to look into the long-term effectiveness of AI interventions for mental health. Such research will inform us about the effectiveness of AI interventions and what harm can be done through long-term use of AI.</p></list-item><list-item><p>Research on combining various sources such as EHRs, wearables, and patient-reported results in a single overall mental health assessment AI system.</p></list-item><list-item><p>Research into the use of AI for crisis intervention would have to account for the strengths and limitations of AI technology in the context of acute mental health crisis situations. Research would need to be conducted in order to ensure that AI technology possesses the skills to identify and respond in an emergency situation and steer the individuals toward the crisis human support when required.</p></list-item><list-item><p>Evaluate various patient engagement strategies for AI-enabled solutions in patient populations whose digital literacy levels span across a range of domains. An understanding of user behavior will inform the development of less complex, user-friendly AI applications.</p></list-item><list-item><p>Work together with policymakers toward drafting policies that enable proper and ethical use of AI in mental health service provision. Such policies should touch upon matters such as liability, standardization, and reimbursement for AI service provision.</p></list-item><list-item><p>In the future, studies need to cover explicit documentation of GDPR and HIPAA compliance. This may include a detailed account of consent mechanisms, data anonymization techniques, and the implementation of privacy by design principles in AI development.</p></list-item></list></sec></sec></body><back><notes><sec><title>Funding</title><p>The authors declared no financial support was received for this work.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AUC</term><def><p>area under the curve</p></def></def-item><def-item><term id="abb3">CBT</term><def><p>cognitive behavioral therapy</p></def></def-item><def-item><term id="abb4">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb5">EAI</term><def><p>explainable artificial intelligence</p></def></def-item><def-item><term id="abb6">EHR</term><def><p>electronic health record</p></def></def-item><def-item><term id="abb7">FDA</term><def><p>Food and Drug Administration</p></def></def-item><def-item><term id="abb8">GDPR</term><def><p>General Data Protection Regulation</p></def></def-item><def-item><term id="abb9">HIPAA</term><def><p>Health Insurance Portability and Accountability Act</p></def></def-item><def-item><term id="abb10">HRV</term><def><p>heart rate variability</p></def></def-item><def-item><term id="abb11">LIME</term><def><p>Local Interpretable Model-Agnostic Explanations</p></def></def-item><def-item><term id="abb12">NLP</term><def><p>natural language processing</p></def></def-item><def-item><term id="abb13">PRISMA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p></def></def-item><def-item><term id="abb14">SaMD</term><def><p>Software as a Medical Device</p></def></def-item><def-item><term id="abb15">SHAP</term><def><p>Shapley Additive Explanations</p></def></def-item><def-item><term id="abb16">WHO</term><def><p>World Health Organization</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><article-title>World mental health report: transforming mental health for all</article-title><source>World Health Organization</source><year>2022</year><access-date>2026-04-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/i/item/9789240049338">https://www.who.int/publications/i/item/9789240049338</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Patel</surname><given-names>V</given-names> </name><name name-style="western"><surname>Saxena</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lund</surname><given-names>C</given-names> </name><etal/></person-group><article-title>The Lancet Commission on global mental health and sustainable development</article-title><source>The Lancet</source><year>2018</year><month>10</month><volume>392</volume><issue>10157</issue><fpage>1553</fpage><lpage>1598</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(18)31612-X</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Afify</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Tannar</surname><given-names>H</given-names> </name><name name-style="western"><surname>Zakria</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Hashish</surname><given-names>A</given-names> </name><name name-style="western"><surname>El-Wafa</surname><given-names>TA</given-names> </name></person-group><article-title>Unveiling the right to health in Egypt: exploring the transformations and challenges in Egyptian constitutional law and policy</article-title><source>Acad J Interdiscip Stud</source><year>2023</year><volume>12</volume><issue>6</issue><fpage>95</fpage><pub-id pub-id-type="doi">10.36941/ajis-2023-0156</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fayed</surname><given-names>A</given-names> </name><name name-style="western"><surname>Zakaria</surname><given-names>A</given-names> </name><name name-style="western"><surname>Abouahmed</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Innovations of artificial intelligence in light of the applicable copyright law: realistic solutions and future prospects. A comparative study of UAE, Egyptian, and French laws</article-title><source>AJEE</source><year>2025</year><month>02</month><day>14</day><volume>8</volume><issue>1</issue><fpage>241</fpage><lpage>263</lpage><pub-id pub-id-type="doi">10.33327/AJEE-18-8.1-a000116</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><source>Deep Medicine: How Artificial Intelligence Can Make Healthcare Human Again</source><year>2019</year><publisher-name>Basic Books</publisher-name></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gkotsis</surname><given-names>G</given-names> </name><name name-style="western"><surname>Oellrich</surname><given-names>A</given-names> </name><name name-style="western"><surname>Velupillai</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Characterisation of mental health conditions in social media using Informed Deep Learning</article-title><source>Sci Rep</source><year>2017</year><month>03</month><day>22</day><volume>7</volume><issue>1</issue><fpage>45141</fpage><pub-id pub-id-type="doi">10.1038/srep45141</pub-id><pub-id pub-id-type="medline">28327593</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jacobson</surname><given-names>NC</given-names> </name><name name-style="western"><surname>Bentley</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Walton</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Ethical dilemmas posed by mobile health and machine learning in psychiatry research</article-title><source>Bull World Health Organ</source><year>2020</year><month>04</month><day>1</day><volume>98</volume><issue>4</issue><fpage>270</fpage><lpage>276</lpage><pub-id pub-id-type="doi">10.2471/BLT.19.237107</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Graham</surname><given-names>S</given-names> </name><name name-style="western"><surname>Depp</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>EE</given-names> </name><etal/></person-group><article-title>Artificial intelligence for mental health and mental illnesses: an overview</article-title><source>Curr Psychiatry Rep</source><year>2019</year><month>11</month><day>7</day><volume>21</volume><issue>11</issue><fpage>1</fpage><lpage>18</lpage><pub-id pub-id-type="doi">10.1007/s11920-019-1094-0</pub-id><pub-id pub-id-type="medline">31701320</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Darcy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Daniels</surname><given-names>J</given-names> </name><name name-style="western"><surname>Salinger</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wicks</surname><given-names>P</given-names> </name><name name-style="western"><surname>Robinson</surname><given-names>A</given-names> </name></person-group><article-title>Evidence of human-level bonds established with a digital conversational agent: cross-sectional, retrospective observational study</article-title><source>JMIR Form Res</source><year>2021</year><month>05</month><day>11</day><volume>5</volume><issue>5</issue><fpage>e27868</fpage><pub-id pub-id-type="doi">10.2196/27868</pub-id><pub-id pub-id-type="medline">33973854</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chekroud</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Zotti</surname><given-names>RJ</given-names> </name><name name-style="western"><surname>Shehzad</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Cross-trial prediction of treatment outcome in depression: a machine learning approach</article-title><source>Lancet Psychiatry</source><year>2016</year><month>03</month><volume>3</volume><issue>3</issue><fpage>243</fpage><lpage>250</lpage><pub-id pub-id-type="doi">10.1016/S2215-0366(15)00471-X</pub-id><pub-id pub-id-type="medline">26803397</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bucci</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bell</surname><given-names>IH</given-names> </name><etal/></person-group><article-title>The growing field of digital psychiatry: current evidence and the future of apps, social media, chatbots, and virtual reality</article-title><source>World Psychiatry</source><year>2021</year><month>10</month><volume>20</volume><issue>3</issue><fpage>318</fpage><lpage>335</lpage><pub-id pub-id-type="doi">10.1002/wps.20883</pub-id><pub-id pub-id-type="medline">34505369</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Eldakak</surname><given-names>A</given-names> </name><name name-style="western"><surname>Alremeithi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dahiyat</surname><given-names>E</given-names> </name><name name-style="western"><surname>El-Gheriani</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mohamed</surname><given-names>H</given-names> </name><name name-style="western"><surname>Abdulrahim Abdulla</surname><given-names>MI</given-names> </name></person-group><article-title>Civil liability for the actions of autonomous AI in healthcare: an invitation to further contemplation</article-title><source>Humanit Soc Sci Commun</source><year>2024</year><volume>11</volume><issue>1</issue><fpage>305</fpage><pub-id pub-id-type="doi">10.1057/s41599-024-02806-y</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>EE</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name><name name-style="western"><surname>De Choudhury</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Artificial intelligence for mental health care: clinical applications, barriers, facilitators, and artificial wisdom</article-title><source>Biol Psychiatry Cogn Neurosci Neuroimaging</source><year>2021</year><month>09</month><volume>6</volume><issue>9</issue><fpage>856</fpage><lpage>864</lpage><pub-id pub-id-type="doi">10.1016/j.bpsc.2021.02.001</pub-id><pub-id pub-id-type="medline">33571718</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jobin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ienca</surname><given-names>M</given-names> </name><name name-style="western"><surname>Vayena</surname><given-names>E</given-names> </name></person-group><article-title>The global landscape of AI ethics guidelines</article-title><source>Nat Mach Intell</source><year>2019</year><volume>1</volume><issue>9</issue><fpage>389</fpage><lpage>399</lpage><pub-id pub-id-type="doi">10.1038/s42256-019-0088-2</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Page</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>McKenzie</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Bossuyt</surname><given-names>PM</given-names> </name><etal/></person-group><article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title><source>BMJ</source><year>2021</year><month>03</month><day>29</day><volume>372</volume><fpage>n71</fpage><pub-id pub-id-type="doi">10.1136/bmj.n71</pub-id><pub-id pub-id-type="medline">33782057</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Grant</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Booth</surname><given-names>A</given-names> </name></person-group><article-title>A typology of reviews: an analysis of 14 review types and associated methodologies</article-title><source>Health Info Libraries J</source><year>2009</year><month>06</month><volume>26</volume><issue>2</issue><fpage>91</fpage><lpage>108</lpage><pub-id pub-id-type="doi">10.1111/j.1471-1842.2009.00848.x</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cooke</surname><given-names>A</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>D</given-names> </name><name name-style="western"><surname>Booth</surname><given-names>A</given-names> </name></person-group><article-title>Beyond PICO: the SPIDER tool for qualitative evidence synthesis</article-title><source>Qual Health Res</source><year>2012</year><month>10</month><volume>22</volume><issue>10</issue><fpage>1435</fpage><lpage>1443</lpage><pub-id pub-id-type="doi">10.1177/1049732312452938</pub-id><pub-id pub-id-type="medline">22829486</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Liberati</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tetzlaff</surname><given-names>J</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><collab>PRISMA Group</collab></person-group><article-title>Preferred Reporting Items for Systematic Reviews and Meta-Analyses: the PRISMA statement</article-title><source>PLoS Med</source><year>2009</year><month>07</month><day>21</day><volume>6</volume><issue>7</issue><fpage>e1000097</fpage><pub-id pub-id-type="doi">10.1371/journal.pmed.1000097</pub-id><pub-id pub-id-type="medline">19621072</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morrison</surname><given-names>A</given-names> </name><name name-style="western"><surname>Polisena</surname><given-names>J</given-names> </name><name name-style="western"><surname>Husereau</surname><given-names>D</given-names> </name><etal/></person-group><article-title>The effect of English-language restriction on systematic review-based meta-analyses: a systematic review of empirical studies</article-title><source>Int J Technol Assess Health Care</source><year>2012</year><month>04</month><volume>28</volume><issue>2</issue><fpage>138</fpage><lpage>144</lpage><pub-id pub-id-type="doi">10.1017/S0266462312000086</pub-id><pub-id pub-id-type="medline">22559755</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>De Choudhury</surname><given-names>M</given-names> </name><name name-style="western"><surname>Gamon</surname><given-names>M</given-names> </name><name name-style="western"><surname>Counts</surname><given-names>S</given-names> </name><name name-style="western"><surname>Horvitz</surname><given-names>E</given-names> </name></person-group><article-title>Predicting depression via social media</article-title><source>ICWSM</source><year>2013</year><volume>7</volume><issue>1</issue><fpage>128</fpage><lpage>137</lpage><pub-id pub-id-type="doi">10.1609/icwsm.v7i1.14432</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Coppersmith</surname><given-names>G</given-names> </name><name name-style="western"><surname>Leary</surname><given-names>R</given-names> </name><name name-style="western"><surname>Crutchley</surname><given-names>P</given-names> </name><name name-style="western"><surname>Fine</surname><given-names>A</given-names> </name></person-group><article-title>Natural language processing of social media as screening for suicide risk</article-title><source>Biomed Inform Insights</source><year>2018</year><volume>10</volume><fpage>1178222618792860</fpage><pub-id pub-id-type="doi">10.1177/1178222618792860</pub-id><pub-id pub-id-type="medline">30158822</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Harrigian</surname><given-names>K</given-names> </name><name name-style="western"><surname>Aguirre</surname><given-names>C</given-names> </name><name name-style="western"><surname>Dredze</surname><given-names>M</given-names> </name></person-group><article-title>On the state of social media data for mental health research</article-title><conf-name>Proceedings of the Seventh Workshop on Computational Linguistics and Clinical Psychology</conf-name><conf-date>Jun 11, 2021</conf-date><pub-id pub-id-type="doi">10.18653/v1/2021.clpsych-1.2</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Ye</surname><given-names>H</given-names> </name><name name-style="western"><surname>Weissweiler</surname><given-names>L</given-names> </name><name name-style="western"><surname>Pei</surname><given-names>R</given-names> </name><name name-style="western"><surname>Schuetze</surname><given-names>H</given-names> </name></person-group><article-title>Crosslingual transfer learning for low-resource languages based on multilingual colexification graphs</article-title><conf-name>Findings of the Association for Computational Linguistics</conf-name><conf-date>Dec 6-10, 2023</conf-date><pub-id pub-id-type="doi">10.18653/v1/2023.findings-emnlp.562</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guntuku</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Yaden</surname><given-names>DB</given-names> </name><name name-style="western"><surname>Kern</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Ungar</surname><given-names>LH</given-names> </name><name name-style="western"><surname>Eichstaedt</surname><given-names>JC</given-names> </name></person-group><article-title>Detecting depression and mental illness on social media: an integrative review</article-title><source>Curr Opin Behav Sci</source><year>2017</year><month>12</month><volume>18</volume><fpage>43</fpage><lpage>49</lpage><pub-id pub-id-type="doi">10.1016/j.cobeha.2017.07.005</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>D&#x2019;Alfonso</surname><given-names>S</given-names> </name><name name-style="western"><surname>Coghlan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Schmidt</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mangelsdorf</surname><given-names>S</given-names> </name></person-group><article-title>Ethical dimensions of digital phenotyping within the context of mental healthcare</article-title><source>J Technol Behav Sci</source><year>2025</year><volume>10</volume><issue>1</issue><fpage>132</fpage><lpage>147</lpage><pub-id pub-id-type="doi">10.1007/s41347-024-00423-9</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zeng</surname><given-names>LL</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Multi-site diagnostic classification of schizophrenia using discriminant deep learning with functional connectivity MRI</article-title><source>EBioMedicine</source><year>2018</year><month>04</month><volume>30</volume><fpage>74</fpage><lpage>85</lpage><pub-id pub-id-type="doi">10.1016/j.ebiom.2018.03.017</pub-id><pub-id pub-id-type="medline">29622496</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bender</surname><given-names>AD</given-names> </name><name name-style="western"><surname>Filmer</surname><given-names>HL</given-names> </name><name name-style="western"><surname>Dux</surname><given-names>PE</given-names> </name></person-group><article-title>Transcranial direct current stimulation of superior medial frontal cortex disrupts response selection during proactive response inhibition</article-title><source>Neuroimage</source><year>2017</year><month>09</month><volume>158</volume><fpage>455</fpage><lpage>465</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2016.10.035</pub-id><pub-id pub-id-type="medline">27789261</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tseng</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Chou</surname><given-names>FH</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>CH</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>YP</given-names> </name></person-group><article-title>Effects of mindfulness-based cognitive therapy on major depressive disorder with multiple episodes: a systematic review and meta-analysis</article-title><source>Int J Environ Res Public Health</source><year>2023</year><month>01</month><day>14</day><volume>20</volume><issue>2</issue><fpage>1555</fpage><pub-id pub-id-type="doi">10.3390/ijerph20021555</pub-id><pub-id pub-id-type="medline">36674310</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ritvo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Ahmad</surname><given-names>F</given-names> </name><name name-style="western"><surname>El Morr</surname><given-names>C</given-names> </name><name name-style="western"><surname>Pirbaglou</surname><given-names>M</given-names> </name><name name-style="western"><surname>Moineddin</surname><given-names>R</given-names> </name><collab>MVC Team</collab></person-group><article-title>A mindfulness-based intervention for student depression, anxiety, and stress: randomized controlled trial</article-title><source>JMIR Ment Health</source><year>2020</year><volume>8</volume><issue>1</issue><fpage>e23491</fpage><pub-id pub-id-type="doi">10.2196/23491</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Laranjo</surname><given-names>L</given-names> </name><name name-style="western"><surname>Dunn</surname><given-names>AG</given-names> </name><name name-style="western"><surname>Tong</surname><given-names>HL</given-names> </name><etal/></person-group><article-title>Conversational agents in healthcare: a systematic review</article-title><source>BMJ</source><year>2018</year><month>09</month><day>1</day><volume>25</volume><issue>9</issue><fpage>1248</fpage><lpage>1258</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocy072</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nwosu</surname><given-names>A</given-names> </name><name name-style="western"><surname>Boardman</surname><given-names>S</given-names> </name><name name-style="western"><surname>Husain</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Doraiswamy</surname><given-names>PM</given-names> </name></person-group><article-title>Digital therapeutics for mental health: is attrition the Achilles heel?</article-title><source>Front Psychiatry</source><year>2022</year><volume>13</volume><fpage>900615</fpage><pub-id pub-id-type="doi">10.3389/fpsyt.2022.900615</pub-id><pub-id pub-id-type="medline">35982936</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Naslund</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Aschbrenner</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Araya</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Digital technology for treating and preventing mental disorders in low-income and middle-income countries: a narrative review of the literature</article-title><source>Lancet Psychiatry</source><year>2017</year><month>06</month><volume>4</volume><issue>6</issue><fpage>486</fpage><lpage>500</lpage><pub-id pub-id-type="doi">10.1016/S2215-0366(17)30096-2</pub-id><pub-id pub-id-type="medline">28433615</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Benjamens</surname><given-names>S</given-names> </name><name name-style="western"><surname>Dhunnoo</surname><given-names>P</given-names> </name><name name-style="western"><surname>Mesk&#x00F3;</surname><given-names>B</given-names> </name></person-group><article-title>The state of artificial intelligence-based FDA-approved medical devices and algorithms: an online database</article-title><source>npj Digit Med</source><year>2020</year><volume>3</volume><issue>1</issue><fpage>60</fpage><lpage>64</lpage><pub-id pub-id-type="doi">10.1038/s41746-020-00324-0</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Brancati</surname><given-names>GE</given-names> </name><name name-style="western"><surname>Nunes</surname><given-names>A</given-names> </name><name name-style="western"><surname>Scott</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Differential characteristics of bipolar I and II disorders: a retrospective, cross-sectional evaluation of clinical features, illness course, and response to treatment</article-title><source>Int J Bipolar Disord</source><year>2023</year><month>07</month><day>14</day><volume>11</volume><issue>1</issue><fpage>25</fpage><pub-id pub-id-type="doi">10.1186/s40345-023-00304-9</pub-id><pub-id pub-id-type="medline">37452256</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Arya</surname><given-names>V</given-names> </name><name name-style="western"><surname>Bellamy</surname><given-names>RKE</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>PY</given-names> </name><etal/></person-group><article-title>AI Explainability 360: an extensible toolkit for understanding data and machine learning models</article-title><source>J Mach Learn Res</source><year>2020</year><access-date>2026-04-16</access-date><volume>21</volume><fpage>1</fpage><lpage>6</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.jmlr.org/papers/volume21/19-1035/19-1035.pdf">https://www.jmlr.org/papers/volume21/19-1035/19-1035.pdf</ext-link></comment></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Obermeyer</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Powers</surname><given-names>B</given-names> </name><name name-style="western"><surname>Vogeli</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mullainathan</surname><given-names>S</given-names> </name></person-group><article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title><source>Science</source><year>2019</year><month>10</month><day>25</day><volume>366</volume><issue>6464</issue><fpage>447</fpage><lpage>453</lpage><pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id><pub-id pub-id-type="medline">31649194</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Ribeiro</surname><given-names>MT</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Guestrin</surname><given-names>C</given-names> </name></person-group><article-title>&#x201C;Why should i trust you?&#x201D;: Explaining the predictions of any classifier</article-title><year>2016</year><conf-name>Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics</conf-name><pub-id pub-id-type="doi">10.18653/v1/N16-3020</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sheller</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Edwards</surname><given-names>B</given-names> </name><name name-style="western"><surname>Reina</surname><given-names>GA</given-names> </name><etal/></person-group><article-title>Federated learning in medicine: facilitating multi-institutional collaborations without sharing patient data</article-title><source>Sci Rep</source><year>2020</year><month>07</month><day>28</day><volume>10</volume><issue>1</issue><fpage>12598</fpage><pub-id pub-id-type="doi">10.1038/s41598-020-69250-1</pub-id><pub-id pub-id-type="medline">32724046</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Kandeel</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Salameh</surname><given-names>HB</given-names> </name><name name-style="western"><surname>Elrefae</surname><given-names>GA</given-names> </name><name name-style="western"><surname>Qasim</surname><given-names>A</given-names> </name></person-group><article-title>Regulations for UAV operation in social applications and services: a general perspective</article-title><conf-name>2022 Ninth International Conference on Social Networks Analysis, Management and Security (SNAMS)</conf-name><conf-date>Nov 29 to Dec 1, 2022</conf-date><pub-id pub-id-type="doi">10.1109/SNAMS58071.2022.10062752</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rocher</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hendrickx</surname><given-names>JM</given-names> </name><name name-style="western"><surname>de Montjoye</surname><given-names>YA</given-names> </name></person-group><article-title>Estimating the success of re-identifications in incomplete datasets using generative models</article-title><source>Nat Commun</source><year>2019</year><month>07</month><day>23</day><volume>10</volume><issue>1</issue><fpage>3069</fpage><pub-id pub-id-type="doi">10.1038/s41467-019-10933-3</pub-id><pub-id pub-id-type="medline">31337762</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dwork</surname><given-names>C</given-names> </name><name name-style="western"><surname>Roth</surname><given-names>A</given-names> </name></person-group><article-title>The algorithmic foundations of differential privacy</article-title><source>Found Trends Theor Comput Sci</source><year>2014</year><month>08</month><day>11</day><volume>9</volume><issue>3-4</issue><fpage>211</fpage><lpage>487</lpage><pub-id pub-id-type="doi">10.1561/0400000042</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="web"><article-title>Ethics and governance of artificial intelligence for health</article-title><source>World Health Organization</source><year>2021</year><access-date>2026-04-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/i/item/9789240029200">https://www.who.int/publications/i/item/9789240029200</ext-link></comment></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dzangare</surname><given-names>G</given-names> </name><name name-style="western"><surname>Gulu</surname><given-names>TA</given-names> </name></person-group><article-title>Adopting artificial intelligence for health information literacy: a literature review</article-title><source>Inform Dev</source><year>2025</year><month>09</month><volume>41</volume><issue>3</issue><fpage>576</fpage><lpage>591</lpage><pub-id pub-id-type="doi">10.1177/02666669251314839</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Shalaby</surname><given-names>AG</given-names> </name><name name-style="western"><surname>Abdelaziz</surname><given-names>GM</given-names> </name><name name-style="western"><surname>Kandeel</surname><given-names>ME</given-names> </name></person-group><article-title>Using artificial intelligence to resolve disputes through online arbitration</article-title><conf-name>2022 Ninth International Conference on Social Networks Analysis, Management and Security (SNAMS)</conf-name><conf-date>Nov 29 to Dec 1, 2022</conf-date><pub-id pub-id-type="doi">10.1109/SNAMS58071.2022.10062524</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gr&#x00FC;nerbl</surname><given-names>A</given-names> </name><name name-style="western"><surname>Muaremi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Osmani</surname><given-names>V</given-names> </name><etal/></person-group><article-title>Smartphone-based recognition of states and state changes in bipolar disorder patients</article-title><source>IEEE J Biomed Health Inform</source><year>2015</year><month>01</month><volume>19</volume><issue>1</issue><fpage>140</fpage><lpage>148</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2014.2343154</pub-id><pub-id pub-id-type="medline">25073181</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mohamed</surname><given-names>H</given-names> </name></person-group><article-title>The iRobo-Surgeon conundrum: comparative reflections on the legal treatment of intraoperative errors committed by autonomous surgical robots</article-title><source>Law Innov Technol</source><year>2024</year><month>01</month><day>2</day><volume>16</volume><issue>1</issue><fpage>194</fpage><lpage>217</lpage><pub-id pub-id-type="doi">10.1080/17579961.2024.2313802</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Arrieta</surname><given-names>AB</given-names> </name><name name-style="western"><surname>D&#x00ED;az-Rodr&#x00ED;guez</surname><given-names>N</given-names> </name><name name-style="western"><surname>Del Ser</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Explainable artificial intelligence (XAI): concepts, taxonomies, opportunities and challenges toward responsible AI</article-title><source>Inform Fusion</source><year>2020</year><month>06</month><volume>58</volume><fpage>82</fpage><lpage>115</lpage><pub-id pub-id-type="doi">10.1016/j.inffus.2019.12.012</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Bellamy</surname><given-names>RKE</given-names> </name><name name-style="western"><surname>Dey</surname><given-names>K</given-names> </name><name name-style="western"><surname>Hind</surname><given-names>M</given-names> </name><etal/></person-group><article-title>AI Fairness 360: an extensible toolkit for detecting, understanding, and mitigating unwanted algorithmic bias</article-title><comment>Preprint posted online on  Oct 3, 2018</comment><pub-id pub-id-type="doi">10.48550/arXiv.1810.01943</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Martinez-Martin</surname><given-names>N</given-names> </name><name name-style="western"><surname>Kreitmair</surname><given-names>K</given-names> </name></person-group><article-title>Ethical issues for direct-to-consumer digital psychotherapy apps: addressing accountability, data protection, and consent</article-title><source>JMIR Ment Health</source><year>2018</year><month>04</month><day>23</day><volume>5</volume><issue>2</issue><fpage>e32</fpage><pub-id pub-id-type="doi">10.2196/mental.9423</pub-id><pub-id pub-id-type="medline">29685865</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Kandeel</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Hamza</surname><given-names>EA</given-names> </name><name name-style="western"><surname>Elrefae</surname><given-names>G</given-names> </name></person-group><article-title>AI governance: a general perspective</article-title><conf-name>2024 Global Congress on Emerging Technologies (GCET)</conf-name><conf-date>Dec 9-11, 2024</conf-date><pub-id pub-id-type="doi">10.1109/GCET64327.2024.10934585</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Checklist 1</label><p>PRISMA checklist.</p><media xlink:href="ai_v5i1e84305_app1.pdf" xlink:title="PDF File, 67 KB"/></supplementary-material></app-group></back></article>