<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR AI</journal-id><journal-id journal-id-type="publisher-id">ai</journal-id><journal-id journal-id-type="index">41</journal-id><journal-title>JMIR AI</journal-title><abbrev-journal-title>JMIR AI</abbrev-journal-title><issn pub-type="epub">2817-1705</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v4i1e75866</article-id><article-id pub-id-type="doi">10.2196/75866</article-id><article-categories><subj-group subj-group-type="heading"><subject>Viewpoint</subject></subj-group></article-categories><title-group><article-title>AI-Supported Shared Decision-Making (AI-SDM): Conceptual Framework</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>As'ad</surname><given-names>Mohammed</given-names></name><degrees>MSc (Healthcare), MBA, MRCEM, MRCS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Faran</surname><given-names>Nawarh</given-names></name><degrees>MBA, BSc Rc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Joharji</surname><given-names>Hala</given-names></name><degrees>Pharma.D, MHA, BCPS, BCMTM</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Corporate Quality &#x0026; Patient Safety, Dr Sulaiman Al Habib Medical Group</institution><addr-line>Olaya Street</addr-line><addr-line>Riyadh</addr-line><country>Saudi Arabia</country></aff><aff id="aff2"><institution>Dr Sulaiman Al Habib Medical Group</institution><addr-line>Riyadh</addr-line><country>Saudi Arabia</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Dankar</surname><given-names>Fida</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Said-Criado</surname><given-names>Ismael</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Meng</surname><given-names>Meiqi</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Mohammed As'ad, MSc (Healthcare), MBA, MRCEM, MRCS, Corporate Quality &#x0026; Patient Safety, Dr Sulaiman Al Habib Medical Group, Olaya Street, Riyadh, 12214, Saudi Arabia, 966 920066666; <email>drmohasad@gmail.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>7</day><month>8</month><year>2025</year></pub-date><volume>4</volume><elocation-id>e75866</elocation-id><history><date date-type="received"><day>12</day><month>04</month><year>2025</year></date><date date-type="rev-recd"><day>24</day><month>06</month><year>2025</year></date><date date-type="accepted"><day>08</day><month>07</month><year>2025</year></date></history><copyright-statement>&#x00A9; Mohammed As'ad, Nawarh Faran, Hala Joharji. Originally published in JMIR AI (<ext-link ext-link-type="uri" xlink:href="https://ai.jmir.org">https://ai.jmir.org</ext-link>), 7.8.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.ai.jmir.org/">https://www.ai.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://ai.jmir.org/2025/1/e75866"/><abstract><p>Shared decision-making is central to patient-centered care but is often hampered by artificial intelligence (AI) systems that focus on technical transparency rather than delivering context-rich, clinically meaningful reasoning. Although AI explainability methods elucidate how decisions are made, they fall short of addressing the &#x201C;why&#x201D; that supports effective patient-clinician dialogue. To bridge this gap, we introduce artificial intelligence&#x2013;supported shared decision-making (AI-SDM), a conceptual framework designed to integrate AI-based reasoning into shared decision-making to enhance care quality while preserving patient autonomy. AI-SDM is a structured, multimodel framework that synthesizes predictive modeling, evidence-based recommendations, and generative AI techniques to produce adaptive, context-sensitive explanations. The framework distinguishes conventional AI explainability from AI reasoning&#x2014;prioritizing the generation of tailored, narrative justifications that inform shared decisions. A hypothetical clinical scenario in stroke management is used to illustrate how AI-SDM facilitates an iterative, triadic deliberation process between health care providers, patients, and AI outputs. This integration is intended to transform raw algorithmic data into actionable insights that directly support the decision-making process without supplanting human judgment.</p></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>shared decision-making</kwd><kwd>AI reasoning</kwd><kwd>clinical decision support</kwd><kwd>generative AI</kwd><kwd>patient-centered care</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Shared decision-making (SDM) is characterized by collaboration between health care professionals (HCPs) and patients to align with patient values [<xref ref-type="bibr" rid="ref1">1</xref>]. It has become central to patient-centered care, marking a shift from historical paternalism [<xref ref-type="bibr" rid="ref2">2</xref>]. Concurrently, artificial intelligence (AI) is increasingly integrated into health care, offering powerful tools for diagnosis, prognostication, and treatment planning [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>], thereby augmenting clinical capabilities through the analysis of vast datasets [<xref ref-type="bibr" rid="ref5">5</xref>]. Despite the potential synergies, effectively integrating AI insights into the established SDM process remains a critical challenge.</p><p>A key barrier lies in the distinction between artificial intelligence explainability (XAI) and AI reasoning. While XAI focuses on rendering algorithmic processes transparent, primarily for technical validation [<xref ref-type="bibr" rid="ref6">6</xref>], it often fails to produce justifications that are clinically meaningful and readily communicable within the patient-HCP dialogue. This technical transparency, though important for trust [<xref ref-type="bibr" rid="ref6">6</xref>], does not equate to the human-centered, contextual reasoning required for SDM. Consequently, there is a disconnect: AI may be explainable technically but not communicable clinically, and traditional SDM frameworks lack mechanisms to incorporate AI-generated reasoning [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>This paper introduces artificial intelligence-supported shared decision-making (AI-SDM), a conceptual framework designed to bridge this gap. AI-SDM leverages predictive modeling, evidence synthesis, and generative AI to embed AI reasoning, contextual, human-interpretable justifications, directly into the SDM workflow. The framework facilitates collaborative deliberation among HCPs, patients, and AI systems, ensuring AI insights are transparent, contestable, and tailored to individual patient circumstances. By positioning AI as a reasoning facilitator rather than a decision maker, AI-SDM aims to enhance decision quality and evidence-based practice while preserving patient autonomy. Herein, we differentiate AI reasoning from explainability, detail the AI-SDM model and its multimodal AI integration, illustrate its potential application in a clinical scenario, and discuss implementation challenges and future directions.</p></sec><sec id="s2"><title>AI Reasoning Versus Explainability</title><p>Integrating AI effectively into SDM demands clarity on key distinctions between AI transparency, XAI, and AI reasoning. AI transparency provides fundamental visibility into the AI&#x2019;s process and data, aiming for openness and enabling auditability. This primarily serves regulators, developers, and users needing to understand &#x201C;What did the system do?&#x201D;, often via access to code or data flow [<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>Building on this, XAI focuses specifically on illuminating the internal algorithmic logic. Its goal is primarily technical&#x2014;model validation, debugging, and fairness checks&#x2014;targeted at developers, data scientists, and auditors&#x2019; fairness [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. XAI answers &#x201C;How did the system produce the output?&#x201D; using techniques like feature importance scores (Shapley Additive Explanation), heatmaps, or local models (Local Interpretable Model-Agnostic Explanations) [<xref ref-type="bibr" rid="ref8">8</xref>]. While vital for technical trust and validation [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>], this technical transparency alone is insufficient for clinical application, as a weight vector or probability score does not equate to a usable explanation for SDM.</p><p>AI reasoning, central to the proposed AI-SDM framework, shifts the focus decisively to clinical relevance and justification within the specific patient context. Its goal is to facilitate understanding and deliberation among the key audience: HCPs and patients. It addresses the crucial question, &#x201C;Why is this output relevant for the patient?&#x201D; by generating clinically meaningful outputs, such as contextual narratives and risk/benefit summaries, rather than raw algorithmic data [<xref ref-type="bibr" rid="ref8">8</xref>]. <xref ref-type="table" rid="table1">Table 1</xref> summarizes these core distinctions.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Distinctions among artificial intelligence (AI) transparency, artificial intelligence explainability (XAI), and AI reasoning.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Feature</td><td align="left" valign="bottom">AI transparency</td><td align="left" valign="bottom">XAI</td><td align="left" valign="bottom">AI reasoning (for AI-SDM)<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td></tr></thead><tbody><tr><td align="left" valign="top">Focus</td><td align="left" valign="top">Visibility of process/data</td><td align="left" valign="top">Internal algorithmic logic</td><td align="left" valign="top">Clinical relevance and justification</td></tr><tr><td align="left" valign="top">Goal</td><td align="left" valign="top">Openness and auditability</td><td align="left" valign="top">Model validation, debugging, and fairness check</td><td align="left" valign="top">Facilitate understanding and deliberation</td></tr><tr><td align="left" valign="top">Audience</td><td align="left" valign="top">Regulators, developers, and users</td><td align="left" valign="top">Developers, data scientists, and auditors</td><td align="left" valign="top">HCP<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> and patients</td></tr><tr><td align="left" valign="top">Answers</td><td align="left" valign="top">&#x201C;What did the system do?&#x201D;</td><td align="left" valign="top">&#x201C;How did the system produce the output?&#x201D;</td><td align="left" valign="top">&#x201C;Why is this output relevant for the patient?&#x201D;</td></tr><tr><td align="left" valign="top">Example output</td><td align="left" valign="top">Access to code/data flow</td><td align="left" valign="top">Feature importance (SHAP)<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup>, heatmaps, LIME<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">Contextual narrative and risk/benefit summary</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AI-SDM: artificial intelligence&#x2013;supported shared decision-making.</p></fn><fn id="table1fn2"><p><sup>b</sup>HCP: health care professional.</p></fn><fn id="table1fn3"><p><sup>c</sup>SHAP: Shapley Additive Explanations.</p></fn><fn id="table1fn4"><p><sup>d</sup>LIME: Local Interpretable Model-Agnostic Explanations.</p></fn></table-wrap-foot></table-wrap><p>The capacity for AI reasoning has evolved significantly. Historically, clinical decision-making relied on human cognition, later supplemented by early rule-based or probabilistic clinical decision support systems offering limited reasoning capabilities [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. The integration of machine learning and, more recently, advanced large language models (LLMs) has transformed AI&#x2019;s potential [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref15">15</xref>]. Modern AI can now perform multistep, domain-specific inference [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>], moving beyond mere pattern recognition to simulate aspects of human deductive, inductive, abductive, and case-based reasoning [<xref ref-type="bibr" rid="ref18">18</xref>]. AI systems draw on diverse reasoning approaches&#x2014;from symbolic logic (transparent but less flexible) and statistical methods (probabilistic and less intuitive causality) to opaque neural networks and hybrid neuro-symbolic or knowledge-infused systems aiming for interpretability and semantic alignment [<xref ref-type="bibr" rid="ref19">19</xref>-<xref ref-type="bibr" rid="ref21">21</xref>].</p><p>This advanced AI reasoning is crucial for SDM, aligning with principles of evidence-based practice and precision medicine [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. SDM requires more than accurate predictions; it demands justifications grounded in clinical workflows, patient history, and anticipated outcomes, enabling deliberation on values and trade-offs [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref23">23</xref>]. AI reasoning provides this by synthesizing large-scale, heterogeneous data (genomic, clinical, real-world evidence) [<xref ref-type="bibr" rid="ref24">24</xref>] and articulating not just what is predicted, but why it applies to the individual, considering complex risk-benefit profiles and personal priorities [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]. AI reasoning thus acts as a communicative, human-centered layer built upon, but distinct from XAI&#x2019;s technical foundations [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. This distinction reshapes trust: while XAI builds trust via technical validation, AI reasoning fosters interpersonal trust through semantic clarity, contextual relevance, and value alignment within the clinical encounter&#x2014;prerequisites for meaningful SDM.</p></sec><sec id="s3"><title>The Role of AI Reasoning in SDM</title><sec id="s3-1"><title>SDM as a Process</title><p>SDM is a structured yet flexible process in which HCPs and patients collaboratively determine the best course of action, integrating medical evidence with the patient&#x2019;s values and preferences. Recognizing that many clinical decisions involve multiple valid options, SDM ensures that the chosen path reflects what matters most to an informed patient. The process unfolds in distinct stages [<xref ref-type="bibr" rid="ref1">1</xref>]. Information exchange serves as the foundation, with HCP presenting viable options, detailing their benefits, risks, and uncertainties. Traditionally, this stage is often supported by static Patient Decision Aids, such as those developed guided by frameworks like the Ottawa Decision Support Framework [<xref ref-type="bibr" rid="ref28">28</xref>]. The aim is to prepare patients by increasing knowledge and helping clarify values. Deliberation follows, allowing the patient and HCP to explore these options in the context of the patient&#x2019;s goals, concerns, and circumstances. This phase encourages active dialogue, where patients seek clarification and HCPs ensure comprehension. Decision-making emerges from this discussion, as both parties reach a consensus that aligns clinical expertise with patient priorities. Finally, implementation translates the decision into action, requiring commitment from both patient and HCP. Adherence depends on confidence in the decision, reinforced by clear communication, trust, and continued support through follow-up. While SDM enhances patient engagement and clinical outcomes, its integration into routine practice remains inconsistent. Effective implementation demands a cultural shift in clinical workflows, supported by training, institutional commitment, and tools that facilitate meaningful participation rather than tokenistic involvement.</p></sec><sec id="s3-2"><title>Challenges in SDM Addressed by AI and Generative AI</title><p>Despite the established benefits of SDM, practical implementation faces substantial barriers that AI, particularly generative AI, can effectively address. The contemporary medical environment presents HCPs and patients with increasingly complex information that can impede effective communication. While traditional AI models provide structured risk stratification and evidence-based recommendations, generative AI complements these by transforming clinical data into adaptive, natural language explanations that facilitate interactive engagement.</p><p>A significant barrier is varying health literacy, with many adults struggling to comprehend complex medical information. Generative AI addresses this by converting dense medical reasoning into accessible narratives, calibrated to individual literacy levels through techniques like reading-level adaptation, while preserving clinical accuracy. This supports more meaningful engagement across diverse patient populations without sacrificing informational integrity. Furthermore, AI reasoning can synthesize information related to multiple conditions or comorbidities, presenting a holistic view tailored to the patient&#x2019;s overall health status, which is often difficult with standard, single-condition PDAs.</p><p>Time constraints consistently limit comprehensive SDM implementation. Generative AI streamlines this process by autonomously producing structured, real-time summaries of clinical options and responding dynamically to patient queries. This capability allows HCPs to allocate consultation time to value-based discussions rather than manual data synthesis, enhancing clinical efficiency without compromising decision quality.</p><p>Patient heterogeneity in clinical priorities and outcome preferences necessitates personalized communication. Generative AI enables interactive dialogue that adapts to individual concerns. For example, it can restructure treatment comparisons to emphasize nonsurgical alternatives when patients express concerns about operative interventions or highlight specific risks and benefits relevant to the patient&#x2019;s unique circumstances (eg, comorbidities). This responsive adaptation ensures explanations evolve according to articulated preferences, supporting truly patient-centered communication. To ensure consistency and interoperability, the output generated by AI reasoning systems could be grounded in standardized clinical terminologies, such as Systematized Nomenclature of Medicine Clinical Terms (SNOMED CT). SNOMED CT provides a comprehensive, computer-processable vocabulary for clinical terms used in EHRs globally. Aligning AI-generated explanations with SNOMED CT could help ensure the terminology used is consistent with the patient&#x2019;s record and potentially compatible with existing structured decision support tools or clinical information systems.</p></sec><sec id="s3-3"><title>AI Reasoning Versus Explainability in SDM</title><p>In clinical decision support, AI reasoning aims to deliver tailored rationales specific to a patient&#x2019;s context and values, going beyond technical transparency. Conventional explainability methods, such as feature-importance plots or probability distributions, may reveal how a model arrives at its outputs, yet rarely clarify why a recommendation is meaningful for this patient. By contrast, AI reasoning situates those outputs within clinical logic and patient priorities, generating user-friendly justifications that directly facilitate SDM conversations. In this way, generative AI can transform raw model outputs into narrative explanations relevant to each patient&#x2019;s unique goals, thus enabling a richer, more interactive exchange than code-level transparency can provide.</p><p>The value of AI in SDM lies not in technical transparency but in delivering clear, relevant, and actionable explanations that support informed decision-making. Generative AI enhances this process by enabling real-time refinement of reasoning based on HCP modifications and patient queries. This dynamic responsiveness allows the system to restructure explanations according to evolving priorities, for instance, shifting focus when patients express preferences regarding quality versus length of life, or adjusting the complexity based on literacy needs. Human-level AI reasoning, augmented by generative AI&#x2019;s capacity to produce adaptive, context-aware explanations, surpasses abstract explainability in clinical relevance and utility, directly supporting the fundamental objectives of SDM in contemporary health care practice.</p></sec></sec><sec id="s4"><title>The Intersection of AI Reasoning and SDM</title><sec id="s4-1"><title>Overlapping Elements of AI Reasoning and SDM</title><p>For AI to effectively support SDM, its reasoning processes must align with the communicative and deliberative nature of HCP-patient interactions. Both AI reasoning and SDM inherently demand clarity, transparency, justification, and personalization. For instance, when an AI provides clinically aligned logic, it directly supports the information exchange step by framing recommendations in medical terms that HCPs can relay and discuss with patients. Transparent recommendations facilitate the deliberation phase by clearly presenting options alongside their respective pros and cons. Similarly, a clear justification for AI-generated outputs bolsters the decision-making step, providing concrete, evidence-based rationales. Additionally, AI adaptability to individual patient contexts, values, and literacy levels emulates the tailored communication essential for effective SDM. An AI system capable of communicating through clinical reasoning can seamlessly integrate into the SDM dialogue. In contrast, an AI that provides only raw recommendations without explanations offers limited value in collaborative clinical interactions (<xref ref-type="table" rid="table2">Table 2</xref>).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Overlap between artificial intelligence (AI) reasoning components and shared decision-making (SDM) process steps.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">AI reasoning component</td><td align="left" valign="bottom">SDM component</td><td align="left" valign="bottom">Overlap</td></tr></thead><tbody><tr><td align="left" valign="top">Clinically aligned logic</td><td align="left" valign="top">Information exchange</td><td align="left" valign="top">AI must explain decisions in terms of medical reasoning HCPs<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> can share with patients.</td></tr><tr><td align="left" valign="top">Transparent recommendations</td><td align="left" valign="top">Deliberation</td><td align="left" valign="top">AI reasoning should present options openly, helping patients and doctors compare choices.</td></tr><tr><td align="left" valign="top">Justification of AI outputs</td><td align="left" valign="top">Decision-making</td><td align="left" valign="top">AI should provide clear rationale (&#x201C;why&#x201D;) to support the chosen option.</td></tr><tr><td align="left" valign="top">Adaptability to patient context</td><td align="left" valign="top">Tailored communication</td><td align="left" valign="top">AI should adjust its explanations to the individual patient&#x2019;s needs and values.</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>HCP: health care professional.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s4-2"><title>What SDM Lacks Without AI Reasoning</title><p>When AI reasoning is absent, HCPs and patients are left with raw scores or black-box outputs that fail to address individual preferences and concerns. Moreover, merely disclosing the technical details of a system&#x2019;s predictions does not sufficiently enable patients to evaluate personal trade-offs. Similarly, it does not help them understand how a recommendation aligns with their health objectives. Consequently, lacking coherent, patient-centered logic, these AI suggestions may appear arbitrary, eroding trust and undermining SDM&#x2019;s commitment to collaborative, value-sensitive decision-making. Ultimately, advice that lacks contextual reasoning, which both the HCP and patient can discuss meaningfully, turns into top-down instructions. Thus, this approach limits the opportunity for a shared dialogue.</p></sec><sec id="s4-3"><title>Bridging AI Reasoning and SDM: Toward AI-SDM</title><p>Bridging the gap between AI capabilities and SDM needs requires a shift toward a new paradigm: AI-SDM. This model emphasizes practical integration and technical feasibility in real-world care. AI-generated explanations must be tailored to the clinical context [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. Just as experienced HCPs adjust communication to different scenarios and patient profiles, AI systems should generate context-sensitive justifications. These must reflect clinical reasoning and align with patient values. For example, in chemotherapy decisions, AI reasoning should emphasize expected efficacy based on tumor type, potential side effects, and survival projections&#x2014;framed according to the patient&#x2019;s values, such as prioritizing quality of life over longevity. In chronic disease management, such as lifestyle interventions, explanations may instead highlight long-term risk reduction and adherence support. Tailoring AI reasoning to clinical context ensures its explanations are both relevant and usable [<xref ref-type="bibr" rid="ref30">30</xref>].</p><p>Integrating AI-SDM into clinical practice requires alignment with existing health IT infrastructure. A feasible workflow might involve the AI-SDM system being triggered within the electronic health record (EHR) during a patient encounter. The system could leverage modern interoperability standards, such as Health Level Seven International Fast Health care Interoperability Resources application programming interfaces, to interface with the EHR [<xref ref-type="bibr" rid="ref31">31</xref>]. These standards enable secure retrieval of up-to-date patient data, including diagnoses, medications, lab results, and problem lists coded using SNOMED CT [<xref ref-type="bibr" rid="ref32">32</xref>]. Predictive AI components would then analyze this data to generate context-specific risk assessments, outcome probabilities, or treatment comparisons based on established models and guidelines. Subsequently, a generative AI component would synthesize these complex outputs into patient-friendly language, creating tailored explanations, summaries, and potentially visual aids. These can be presented directly within the EHR interface for the HCP and patient to review and discuss together.</p><p>Successful adoption hinges not only on technical integration but also on stakeholder readiness. Key hurdles include ensuring robust IT infrastructure and establishing privacy-compliant data governance protocols. Providing adequate training for HCPs is also key. This helps them effectively use and critically appraise AI outputs within the SDM context. Strong leadership and organizational commitment are essential to address these challenges, supporting integration and promoting AI as a collaborative tool. This tool enhances, rather than replaces, clinical judgment and patient partnership. Such a standards-based foundation is a prerequisite for reliable data retrieval, consistent interpretation, and effective AI-SDM deployment across diverse clinical settings and platforms.</p></sec></sec><sec id="s5"><title>Defining AI-SDM: A New Conceptual Model</title><sec id="s5-1"><title>Theoretical Foundations of AI-SDM</title><p>Dual-process theory of clinical cognition proposes that clinicians alternate between fast, intuitive pattern recognition (system 1) and slower, analytical reasoning (system 2) when diagnosing and selecting treatments [<xref ref-type="bibr" rid="ref33">33</xref>]. AI-SDM mirrors this architecture by pairing predictive and recommendation models, which emulate System 2&#x2019;s probabilistic deliberation, with a generative reasoning layer that approximates System 1&#x2019;s narrative synthesis. This pairing enables the framework to deliver quantitative risk estimates while simultaneously providing context-sensitive justifications that fuel real-time dialogue. The model is further anchored in the Ottawa Decision Support Framework, which conceptualizes SDM as a sequence of need identification, values clarification, and decision support [<xref ref-type="bibr" rid="ref34">34</xref>]. By embedding adaptive values-clarification prompts within the generative layer, AI-SDM operationalizes these stages and ensures that explanations evolve in response to patient priorities. Principles of patient-centered communication likewise inform system design: explanations are calibrated to individual literacy, emotional state, and cultural context to preserve relational autonomy and encourage bidirectional questioning [<xref ref-type="bibr" rid="ref35">35</xref>]. Empirical evidence shows that decision aids incorporating tailored narratives and explicit values clarification improve decisional quality and patient trust, particularly when powered by AI-driven reasoning engines that maintain transparency and contestability [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. Synthesizing these theoretical strands positions AI-SDM not merely as a technological overlay but as a cognitive and communicative scaffold that aligns algorithmic inference with the epistemic norms of evidence-based, patient-centered care.</p></sec><sec id="s5-2"><title>What Is AI-SDM?</title><p>AI-SDM is a comprehensive, multimodel conceptual framework developed to incorporate AI-driven reasoning into clinical decision-making. It explicitly ensures HCP oversight and preserves patient autonomy. Unlike conventional AI-based decision-support tools that often focus solely on algorithmic outputs or technical explainability, AI-SDM introduces a collaborative reasoning approach. It enables real-time interaction and deliberation among HCPs, patients, and AI-generated insights. AI-SDM is built upon a multilayered AI system where different AI models contribute distinct functionalities: predictive AI performs risk stratification and outcome modeling; recommendation AI retrieves evidence-based guidelines and treatment options; natural language processing (NLP) AI extracts relevant data from clinical records; and generative AI functions as the crucial reasoning facilitator, transforming complex, structured AI outputs into interactive, patient-specific explanations. Through this synergistic integration, AI-SDM ensures that AI remains an adaptive and justifiable tool. It allows HCPs and patients to engage in structured deliberation while preserving the core principles of SDM.</p><p>AI-SDM builds on advances from sophisticated clinical decision support systems and incorporates Human-Computer Interaction principles for usability. It distinguishes itself fundamentally by its primary goal. That is to generate adaptive, narrative clinical reasoning specifically designed to facilitate the triadic deliberation (HCP-patient-AI) inherent in the SDM process. It shifts the focus from mere prediction or transparency toward context-rich, personalized justifications that clinicians can explore, modify, and communicate in natural language. While <xref ref-type="table" rid="table1">Table 1</xref> compared technical forms of AI interpretation, <xref ref-type="table" rid="table3">Table 3</xref> expands the comparison to full clinical decision frameworks, contrasting how SDM, XAI, and AI-SDM function at the bedside.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Comparison of traditional shared decision-making (SDM), artificial intelligence explainability (XAI), and artificial intelligence&#x2013;supported shared decision-making (AI-SDM) framework.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Dimension</td><td align="left" valign="bottom">Traditional SDM</td><td align="left" valign="bottom">XAI</td><td align="left" valign="bottom">AI-SDM framework (proposed)</td></tr></thead><tbody><tr><td align="left" valign="top">Purpose</td><td align="left" valign="top">Aligning decisions with patient values</td><td align="left" valign="top">Explain algorithm outputs</td><td align="left" valign="top">Generate contextual and patient-specific reasoning</td></tr><tr><td align="left" valign="top">Output format</td><td align="left" valign="top">Human dialogue and evidence summaries</td><td align="left" valign="top">SHAP<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> values, LIME<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup>, and saliency maps</td><td align="left" valign="top">Adaptive narrative, visual, and verbal reasoning</td></tr><tr><td align="left" valign="top">Workflow integration</td><td align="left" valign="top">Manual and time-intensive</td><td align="left" valign="top">External to workflow</td><td align="left" valign="top">Embedded within clinical encounter workflow</td></tr><tr><td align="left" valign="top">Personalization</td><td align="left" valign="top">Based on clinician skill/time</td><td align="left" valign="top">Minimal; generalized models</td><td align="left" valign="top">High; tailored to clinical context and patient data</td></tr><tr><td align="left" valign="top">Patient role</td><td align="left" valign="top">Dialogue partner</td><td align="left" valign="top">Passive receiver</td><td align="left" valign="top">Active participant in AI-driven<sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup> deliberation</td></tr><tr><td align="left" valign="top">Clinician role</td><td align="left" valign="top">Central guide</td><td align="left" valign="top">Interpreter of AI outputs</td><td align="left" valign="top">Deliberation lead, with modifiable AI input</td></tr><tr><td align="left" valign="top">Use of AI</td><td align="left" valign="top">None</td><td align="left" valign="top">Explanatory only</td><td align="left" valign="top">Multimodel: predictive, generative, NLP<sup><xref ref-type="table-fn" rid="table3fn4">d</xref></sup>, and recommendation</td></tr><tr><td align="left" valign="top">Transparency</td><td align="left" valign="top">Human-led discussion</td><td align="left" valign="top">Technical interpretability</td><td align="left" valign="top">Justifiable clinical reasoning in natural language</td></tr><tr><td align="left" valign="top">Limitations</td><td align="left" valign="top">Time, consistency, and cognitive load</td><td align="left" valign="top">Low usability in clinical conversations</td><td align="left" valign="top">Dependent on quality of AI design and integration</td></tr><tr><td align="left" valign="top">Example scenario</td><td align="left" valign="top">Stroke decision made via verbal counseling</td><td align="left" valign="top">Feature weights for &#x201C;recommend thrombectomy&#x201D;</td><td align="left" valign="top">Narrative of options, risks, and priorities generated in-session</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>SHAP: Shapley Additive Explanation.</p></fn><fn id="table3fn2"><p><sup>b</sup>LIME: Local Interpretable Model-Agnostic Explanations.</p></fn><fn id="table3fn3"><p><sup>c</sup>AI: artificial intelligence.</p></fn><fn id="table3fn4"><p><sup>d</sup>NLP: natural language processing.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s5-3"><title>AI-SDM Workflow and Multimodel AI Integration</title><p>AI-SDM operates through 4 integrated phases. Each phase leverages specialized AI models while preserving HCP oversight and patient autonomy (<xref ref-type="fig" rid="figure1">Figure 1</xref>).</p><p>The decision process begins with structured data acquisition. This involves gathering information from 3 essential sources: HCP-provided medical history and diagnostic considerations; patient-articulated values, goals, and risk preferences; and AI-derived evidence from clinical guidelines and research findings. During this phase, 3 specific AI functions are used. Predictive AI performs personalized risk assessment and outcomes analysis. Recommendation AI determines evidence-based treatment paths. Additionally, NLP with LLMs extracts unstructured data from health records and literature.</p><p>Following data integration, AI-SDM synthesizes statistical models, clinical best practices, and individual patient characteristics into a structured decision model. This model then generates 2 distinct outputs. First, HCPs receive a comprehensive, evidence-based report detailing risk-adjusted treatment pathways, complete with probability estimates and confidence intervals. Second, patients receive an interactive explanation, which may include visual aids, tailored to their understanding. Generative AI plays a crucial role by transforming these structured outputs into context-specific explanations. These explanations are adaptable to user engagement, thereby surpassing the limitations of static AI summaries.</p><p>An important conceptual consideration in this multimodel integration is the potential for conflicting or inconsistent outputs. Such conflicts can arise between the predictive, recommendation, and NLP components. For example, a high predicted risk from the predictive model might conflict with a standard guideline recommendation from the recommendation module. To address these conflicts, the AI-SDM framework incorporates a dedicated reconciliation layer. This layer automatically applies a clinically prioritized weighting mechanism. If a conflict occurs, the system assigns greater weight to validated risk factors while flagging any unresolved discrepancies for HCP review. This process ensures full transparency regarding potential ambiguities within the underlying data. Moreover, it maintains a robust foundation that supports subsequent generative AI reasoning. This ensures both transparency and audibility of any data ambiguities.</p><p>A central innovation in the AI-SDM workflow involves converting structured algorithmic output into adaptive, human-centered reasoning. Instead of static recommendations, generative AI produces dynamic, context-sensitive explanations that evolve based on HCP and patient interaction. These explanations are explicitly grounded in the underlying evidence and are safeguarded against potential biases or hallucinations (details of this implementation are beyond the scope of this paper). The AI component is designed for adaptability in both content and timing. It can, for instance, provide concise, rapid summaries for acute scenarios or more detailed rationales for planned consultations. The AI delivers reasoning, rather than merely outcomes, through dual channels tailored specifically to HCP and patient needs. This transforms the AI from a data synthesizer into a deliberation partner, supporting more justifiable clinical decisions.</p><p>The AI-SDM model facilitates real-time modification of AI-generated reasoning through continuous HCP evaluation and patient engagement. HCPs can adjust recommendations based on their expertise and contextual factors that extend beyond algorithmic reach. Simultaneously, patients can interrogate specific risks and refine their preferences. In response to these inputs, generative AI dynamically updates explanations. This iterative adaptation process ensures continuous alignment with both clinical judgment and evolving patient priorities.</p><p>The culmination of this process is a human-controlled, AI-assisted decision that aligns clinical evidence with patient values. AI-enhanced documentation captures the deliberative process, preserving transparency and accountability in medical records. The system can then generate personalized educational materials to support treatment adherence and follow-up strategies, ensuring continuity of care beyond the initial decision point.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>AI-supported SDM conceptual model: a structured, multiphase workflow for integrating AI-generated reasoning into SDM. The model begins with input collection from health care professionals (HCPs; medical context), patients (values, preferences), and AI-derived sources (clinical data and guidelines). Core AI functions, predictive modeling, clinical recommendation, and NLP support contextual risk stratification and evidence synthesis. Generative AI then produces adaptive, human-centered explanations tailored separately for HCPs and patients. The system supports real-time refinement of reasoning through HCP adjustments and patient queries, culminating in a human-controlled shared decision and follow-up planning. Color key: blue boxes within the diagram indicate processes or stages that generate multiple distinct outputs or lead to multiple subsequent steps in the workflow; pink boxes represent processes or outputs that are directly driven or generated by AI components. AI: artificial intelligence; CDSS: clinical decision support system; EHR: electronic health record; NLP: natural language processing; SDM: shared decision-making.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v4i1e75866_fig01.png"/></fig></sec><sec id="s5-4"><title>Hypothetical Application: AI-SDM in Stroke Management</title><sec id="s5-4-1"><title>Overview</title><p>The decision to perform mechanical thrombectomy or pursue medical therapy in elderly patients with acute ischemic stroke presents a complex, high-risk clinical scenario requiring rapid yet nuanced deliberation. While thrombectomy significantly improves functional outcomes in patients with large-vessel occlusion, older adults face unique challenges such as increased procedural risks, pre-existing comorbidities, and varied rehabilitation potential [<xref ref-type="bibr" rid="ref38">38</xref>]. AI-SDM enhances this decision-making process by integrating predictive modeling, evidence-based recommendations, NLP for context extraction, and generative AI to facilitate structured, adaptive reasoning.</p></sec><sec id="s5-4-2"><title>Scenario</title><p>A patient, aged 82 years, presents with an acute ischemic stroke due to an occlusion of the middle cerebral artery. Neuroimaging confirms a substantial penumbral salvageable region with a small infarct core, indicating potential eligibility for thrombectomy based on current criteria [<xref ref-type="bibr" rid="ref39">39</xref>]. However, the patient has a history of hypertension, mild cognitive impairment, and prior minor strokes, all of which influence the potential for meaningful neurological recovery and postprocedure rehabilitation. The AI-SDM workflow guides the decision-making process by structuring the evaluation into distinct phases, ensuring that clinicians and patients engage in a transparent and data-driven discussion.</p></sec><sec id="s5-4-3"><title>Phase 1: Input and Context Collection</title><p>This phase initiates the process by consolidating patient, clinician, and AI-derived inputs. The clinician provides an assessment of the patient&#x2019;s neurological status, prestroke function, and imaging results, while the patient and family articulate treatment priorities (eg, maximizing independence) and risk tolerance. AI synthesizes these inputs through distinct subcomponents: predictive AI generates probability-adjusted functional outcome estimates (eg, modified Rankin Scale scores) based on real-world stroke registries and thrombectomy trials [<xref ref-type="bibr" rid="ref40">40</xref>]; recommendation AI retrieves current stroke management guidelines. Additionally, NLP integrated with LLM extracts relevant historical data from the patient&#x2019;s records, such as identifying and categorizing symptoms, diagnoses, and treatment plans, which helps clinicians make informed decisions [<xref ref-type="bibr" rid="ref41">41</xref>]. This comprehensive dataset serves as the foundation for AI-generated reasoning.</p></sec><sec id="s5-4-4"><title>Phase 2: AI Reasoning Generation</title><p>Here, AI-SDM integrates structured insights into a clinical model. It facilitates individualized decision support. The AI synthesizes statistical models predicting outcomes with or without thrombectomy. It incorporates clinical best practices based on guideline recommendations. It also includes patient-specific variables such as age, comorbidities, and imaging findings. These are combined into a structured analysis adapted for clinicians and patient needs. Drawing from studies such as DAWN and DEFUSE-3, the system provides outcome and risk projections [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. It may show based on such studies that thrombectomy increases independence, for example, from 25% to 50%. It may also show a 10% chance of symptomatic intracerebral hemorrhage. The clinician&#x2019;s view presents a quantitative comparison of 90-day functional outcomes. For patients, generative AI transforms these insights into a simplified, interactive format. It presents recovery trajectories and risks using visual aids and clear language.</p></sec><sec id="s5-4-5"><title>Phase 3: Interactive Clinician-Patient Deliberation</title><p>This phase enables real-time clinician-patient engagement with the AI-generated insights via a dedicated interface supporting both voice and text-based interactions. The patient might query the AI about expected recovery timelines or independence, prompting generative AI to adjust explanations using refined predictive models. The interface simultaneously displays the original and updated outputs side by side, enabling the clinician to review, modify, and discuss these results with the patient. By recalibrating the risk-benefit summary in response to each query, the system keeps every explanation grounded in evidence-based data. This process occurs within a structured deliberation framework where AI is a support, not a decision maker. Because these updates happen in near real time, clinicians and patients remain actively involved in refining the decision until they reach a fully informed consensus. Patient feedback is integrated, and clinicians may review and adjust AI reasoning accordingly. This iterative loop allows both parties to deepen their understanding before reaching a decision.</p></sec><sec id="s5-4-6"><title>Phase 4: Shared Decision Implementation and Documentation</title><p>The process concludes with the clinician and patient reaching a shared decision informed by the AI-assisted deliberation. In this scenario, the patient, having engaged with the structured reasoning, opts for mechanical thrombectomy after weighing the potential benefits against the articulated risks. AI then facilitates implementation by generating structured documentation of the decision rationale for the medical record, ensuring transparency. The shared decision is fully clinician- and patient-controlled, with AI strictly supporting the process. Generative AI can also assist in drafting personalized postprocedure care recommendations, outlining rehabilitation expectations, and follow-up plans. The system continues to support follow-up planning and adaptation, ensuring the implementation remains aligned with patient needs. Throughout, the AI acts as a facilitator, ensuring the decision is guided by evidence and patient values under clinician oversight. <xref ref-type="table" rid="table4">Table 4</xref> summarizes these 4 phases using the acute ischemic stroke scenario.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Summary of artificial intelligence&#x2013;supported shared decision-making (AI-SDM) phases in the stroke scenario.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">AI-SDM phase</td><td align="left" valign="bottom">Application in acute stroke scenario example</td></tr></thead><tbody><tr><td align="left" valign="top">Input and context collection</td><td align="left" valign="top">Patient aged 82 years with MCA<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup> occlusion. Imaging shows salvageable penumbra and small infarct core. Clinician assesses neurological status, prestroke function, and imaging. Patient/family expresses independence goals and risk tolerance. Predictive AI<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup> generates probability-adjusted functional outcome estimates from stroke registries and thrombectomy trials. Recommendation AI retrieves current stroke management guidelines. NLP<sup><xref ref-type="table-fn" rid="table4fn3">c</xref></sup>+LLM<sup><xref ref-type="table-fn" rid="table4fn4">d</xref></sup> extracts relevant historical data, including symptoms, diagnoses, and treatment plans.</td></tr><tr><td align="left" valign="top">AI reasoning generation</td><td align="left" valign="top">AI integrates predictions, guidelines, and patient variables into structured analysis. Based on DAWN/DEFUSE-3, it estimates outcomes (eg, 25%&#x2010;50% independence, 10% hemorrhage risk). Clinician&#x2019;s view presents a quantitative comparison of 90-day outcomes. Generative AI presents simplified, interactive patient explanations using visual aids and clear language.</td></tr><tr><td align="left" valign="top">Interactive decision refinement</td><td align="left" valign="top">Patient queries recovery timelines or independence. Clinician adjusts AI outputs based on rehab and support. Occurs within a structured deliberation framework where AI is a support tool. Generative AI updates reasoning dynamically. Patient feedback is integrated. Clinicians may review and adjust AI reasoning.</td></tr><tr><td align="left" valign="top">Final decision and implementation</td><td align="left" valign="top">Shared decision made after AI-assisted deliberation. Patient selects thrombectomy. AI documents rationale and generates personalized postprocedure recommendations, including rehab expectations and follow-up. System supports ongoing adaptation. The decision is fully clinician- and patient-controlled.</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>MCA: middle cerebral artery.</p></fn><fn id="table4fn2"><p><sup>b</sup>AI: artificial intelligence.</p></fn><fn id="table4fn3"><p><sup>c</sup>NLP: natural language processing.</p></fn><fn id="table4fn4"><p><sup>d</sup>LLM: large language model.</p></fn></table-wrap-foot></table-wrap><p>Through this structured AI-SDM approach, complex stroke treatment decisions can remain data-driven, transparent, and patient-centered, leveraging advanced analytics and adaptive explanations within a collaborative framework.</p><p>While the stroke scenario illustrates AI-SDM in an acute, time-sensitive setting, the framework&#x2019;s principles also apply to complex, preference-sensitive decisions in chronic disease management. In advanced chronic kidney disease, particularly among older adults, patients often face substantial burdens and uncertain benefits from dialysis and may remain uninformed about conservative kidney management as a treatment choice [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. Likewise, in cardiology, decisions such as whether to pursue left atrial appendage occlusion instead of long-term anticoagulation for atrial fibrillation, or how to manage advanced heart failure in line with patient goals, frequently require nuanced SDM discussions [<xref ref-type="bibr" rid="ref46">46</xref>]. AI-SDM can help address these challenges by integrating longitudinal data, evidence-based predictions, and patient-reported outcomes, thus facilitating more individualized deliberation around what matters most to each patient over the course of their illness trajectory.</p></sec></sec><sec id="s5-5"><title>Ensuring AI-SDM Preserves Patient Autonomy</title><p>A fundamental requirement for AI-SDM is that it must safeguard patient autonomy and uphold the ethos of SDM at every step. To this end, the model is designed such that the AI&#x2019;s outputs are always transparent, open to question, and subordinate to human input. Both the HCP and the patient should be empowered to challenge or adjust the AI&#x2019;s suggestions freely. For example, if the AI&#x2019;s analysis seems to favor a particular treatment strongly, the patient can ask for clarification or express discomfort, and the HCP can probe the AI&#x2019;s reasoning for validity&#x2014;in both cases, the AI must accommodate these challenges by explaining its rationale or recalibrating its advice. This contestability is deliberate: the AI is not a black box oracle handing down decisions, but a tool that invites scrutiny. Transparency is crucial here; the AI-SDM system should clearly communicate why it is highlighting certain options (eg, &#x201C;Option A is supported by X study for patients with your profile&#x201D;) so that the human participants can critically evaluate the reasoning. By avoiding opaque or one-sided recommendations, the AI prevents any undue influence or bias that could pressure the patient. In practice, this means AI-SDM will present multiple options with evidence rather than a singular &#x201C;do this&#x201D; directive, and it will explicitly incorporate the patient&#x2019;s own goals into its analysis. The HCP retains ultimate responsibility to interpret and, if necessary, correct the AI&#x2019;s output before any action. In sum, AI-SDM is constructed as a facilitator, not a decision maker: it expands the information and reasoning available to the patient and HCP, but it never replaces their agency. The patient&#x2019;s values and the HCP&#x2019;s professional judgment remain at the center of every decision, thereby preserving the autonomy and individualized nature of care.</p></sec></sec><sec id="s6"><title>Challenges and Future Directions</title><p>The successful integration of AI into SDM requires proactively addressing critical implementation barriers to ensure clinical uptake, effectiveness, and ethical deployment.</p><sec id="s6-1"><title>HCP Adoption and Trust</title><p>Adoption hinges on transparent, interpretable AI systems that avoid &#x201C;black box&#x201D; functionality. In AI-SDM, generative AI transforms complex algorithmic outputs into verifiable explanations with clear references to clinical guidelines and explicit confidence levels. Implementation requires structured reasoning pathways that allow HCPs to interrogate AI-derived conclusions and understand their evidentiary basis, particularly when recommendations diverge from conventional practice.</p></sec><sec id="s6-2"><title>Regulatory Landscape and Liability</title><p>Navigating the evolving regulatory frameworks for AI-assisted clinical decision-making is crucial. AI-SDM systems, particularly those providing diagnostic or therapeutic recommendations, would likely be considered software as a medical device and need to align with guidelines from regulatory bodies like the US Food and Drug Administration or equivalent authorities globally. Key considerations include rigorous validation, demonstrating safety and effectiveness, ensuring transparency (allowing HCPs to independently review the basis for recommendations), and implementing robust quality management systems, including postmarket surveillance. While AI-SDM preserves human oversight by positioning AI as decision support rather than the ultimate decision maker, clear governance policies are needed to delineate responsibility among developers, HCPs, and health care institutions, especially concerning liability if AI suggestions deviate from the standard of care.</p></sec><sec id="s6-3"><title>Ethical Considerations and Equity</title><p>AI-SDM must be implemented ethically, safeguarding patient rights and promoting equity. This includes strict adherence to data privacy regulations pertinent to health information, such as the principles outlined in the Health Insurance Portability and Accountability Act in the United States or the General Data Protection Regulation in Europe, as well as relevant national or local regulations (eg, in Saudi Arabia). Systems must be designed to accommodate diverse health literacy levels, cultural contexts, and cognitive abilities, and generative AI interfaces should dynamically adjust explanation complexity based on individual needs while preserving clinical accuracy. Furthermore, proactive measures are essential to address potential algorithmic biases, which could arise from training data used in the predictive or recommendation models. To prevent AI hallucinations and preserve clinical integrity, each generative output is anchored to explicit citations from validated guidelines or peer-reviewed studies. An automated audit protocol continuously monitors real-time outputs for discrepancies, flagging any deviations from established evidence standards so that HCPs can rapidly override or adjust the AI&#x2019;s recommendations. This includes rigorous auditing of the underlying predictive and recommendation models for fairness across demographic groups and designing the generative AI reasoning layer to explicitly surface significant uncertainties or conflicting evidence that might stem from data limitations or potential biases.</p><p>Building on these safeguards, future deployments will institute a 4-layer governance loop for continuous bias mitigation. First, training pipelines will use fairness-aware algorithms&#x2014;such as reweighting and equalized-odds postprocessing&#x2014;to correct calibration disparities before clinical deployment, an approach recommended by Rajkomar et al [<xref ref-type="bibr" rid="ref47">47</xref>] for advancing health equity in machine-learning systems. Second, the production environment will stream model outputs into a real-time dashboard that audits performance by age, sex, ethnicity, and socioeconomic status; similar bias-auditing infrastructures have been shown to reveal hidden performance gaps in widely used clinical algorithms [<xref ref-type="bibr" rid="ref48">48</xref>]. Third, quarterly ethical-compliance reviews will examine data provenance, feature attribution, and workflow impact to maintain regulatory alignment, and finally, all bias metrics and remediation actions will be logged in a version-controlled registry to support external audit and public transparency. Together, these stages create an auditable feedback loop that limits drift, documents remediation, and embeds fairness governance directly into routine system maintenance.</p><p>The sociotechnical impact of AI-SDM also depends on how clinicians and patients adopt, negotiate, and contest its recommendations. Rogers&#x2019; Diffusion of Innovations theory explains variability in uptake by highlighting perceived complexity, relative advantage, and trialability, whereas technological-determinist perspectives warn that overly authoritative AI may erode clinician agency, and social-constructivist analyses emphasize that users actively reshape technology through practice [<xref ref-type="bibr" rid="ref49">49</xref>]. To preserve balanced doctor-patient dynamics, AI-SDM therefore labels the scope and limitations of every recommendation, requires explicit clinician confirmation before any automated action, and provides a &#x201C;why-question&#x201D; interface so both parties can interrogate underlying evidence or override suggestions. Empirical work on person-centered AI indicates that transparent, assistive designs strengthen trust when clinicians retain control, while unmoderated reliance can attenuate empathy and SDM [<xref ref-type="bibr" rid="ref50">50</xref>]. Embedding these sociological insights into interface rules and governance policies anchors AI-SDM in relational autonomy and guards against power imbalances.</p></sec><sec id="s6-4"><title>Technical Integration and Workflow</title><p>The clinical utility of AI-SDM depends on seamless integration with existing EHR systems and clinical workflows. Implementation requires user-friendly interfaces that generate concise, contextually relevant insights without increasing cognitive burden or documentation requirements for HCPs. However, seamlessly embedding this potentially complex, multistep interaction, particularly the deliberative refinement phase, into time-constrained and varied clinical workflows represents a significant practical and design hurdle. Achieving this without disrupting clinical practice or unduly lengthening consultations will be critical for successful adoption. As discussed earlier (in section &#x201C;Bridging AI Reasoning and SDM: Toward AI-SDM&#x201D;), leveraging interoperability standards like Fast Health care Interoperability Resources and terminologies like SNOMED CT is vital. Ultimately, AI-SDM must demonstrate measurable improvements in decision quality, patient experience, or efficiency to justify the technological investment and workflow adjustments required for widespread adoption.</p><p>Addressing these multifaceted challenges necessitates an iterative implementation approach, combining continuous HCP and patient feedback with rigorous validation. Validating the efficacy and safety of the AI-SDM framework itself would require a phased approach, progressing from algorithmic validation of individual AI components and rigorous usability testing of the interface and explanation formats, through simulation studies assessing decision quality, to eventual pilot clinical trials evaluating real-world impacts on patient engagement, decision concordance, and outcomes. Successful deployment will ultimately depend on collaborative governance structures that balance technological innovation with clinical pragmatism, ethical principles, patient safety, and regulatory compliance.</p></sec><sec id="s6-5"><title>Future Directions</title><p>Realizing the potential of AI-SDM necessitates substantial future research and development. Key priorities include the rigorous development and refinement of the generative reasoning component, incorporating robust mechanisms for clinical validity, grounding, and bias mitigation, alongside effective strategies for reconciling outputs from disparate AI models. While this paper introduces AI-SDM as a conceptual framework, future work could explore empirical validation, such as usability studies, workflow simulations, or clinical implementation pilots, to assess its impact on decision quality, patient engagement, and workflow integration. Further research grounded in HCI principles may also inform how the model could integrate seamlessly into clinical environments without increasing HCP burden. Finally, ongoing investigation into dynamic fairness auditing, evolving regulatory pathways for AI-driven SDM tools, and establishing clear governance structures will be crucial for responsible and equitable deployment.</p><p>Building on the dual-process and patient-centered theories outlined above, future iterations of AI-SDM will deepen its affective intelligence by coupling multimodal emotion&#x2010;recognition pipelines with the existing generative explanation layer. Recent work demonstrates that equipping decision-support systems with emotional capabilities can reduce affective bias and improve user trust when complex trade-offs are discussed [<xref ref-type="bibr" rid="ref51">51</xref>]. To operationalize this insight, we plan to integrate a multimodal deep learning model that fuses facial microexpressions, vocal prosody, and lexical sentiment&#x2014;an approach shown to outperform unimodal affect detectors in health care contexts and to strengthen access trust between patients and clinicians [<xref ref-type="bibr" rid="ref52">52</xref>]. Continuous emotion streams will inform dynamic values-clarification prompts generated by the narrative engine, ensuring that explanations adapt when signs of confusion, anxiety, or decisional conflict emerge. A recent systematic review of emotion-recognition AI identifies transparent feature attribution and dataset diversity as prerequisites for reliable affective computing in clinical environments; these requirements will guide our data-governance and model-validation strategy [<xref ref-type="bibr" rid="ref52">52</xref>]. Finally, evidence from a randomized trial of an AI-enabled decision aid shows that personalized, empathetic narratives significantly improve decisional quality and shared-decision metrics compared with static educational material. By embedding such adaptive affective feedback into AI-SDM, we not only enhance the emotional-computing module but also further align the framework with the Ottawa Decision Support and patient-centered communication theories that underpin its interdisciplinary foundation.</p></sec></sec><sec id="s7" sec-type="conclusions"><title>Conclusions</title><p>Integrating AI into clinical practice requires more than predictive accuracy; it demands alignment with patient-centered care principles like SDM. This paper introduces AI-SDM, a conceptual framework designed to bridge this gap. AI-SDM leverages predictive modeling, evidence synthesis, and generative AI to embed AI reasoning, contextual, human-interpretable justifications, directly into the SDM workflow, facilitating collaborative deliberation among HCPs, patients, and AI, ensuring insights are tailored. However, several limitations warrant attention, including the need for pilot studies to test real-world feasibility, clear protocols for reconciling conflicting model outputs, and safeguards against AI hallucinations. Immediate next steps will involve simulation-based validation and user-centered design iterations to refine how AI-SDM integrates with existing clinical workflows. While significant implementation challenges remain, including ethical considerations, regulatory alignment, and workflow integration, AI-SDM offers a promising pathway. By synergizing AI&#x2019;s analytical power with the personalized approach of SDM, this model can potentially enhance decision quality, foster patient autonomy, and advance evidence-based, patient-centered care in the era of intelligent health systems.</p></sec></body><back><fn-group><fn fn-type="con"><p>The study was conceptualized by MA (lead), with support from NF and HJ. The methodology and visualization were led by MA. MA also took the lead in writing the original draft of the manuscript. The review and editing of the manuscript were carried out by MA (lead), with contributions from NF and HJ. Project administration was also led by MA.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations:</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AI-SDM</term><def><p>artificial intelligence&#x2013;supported shared decision-making</p></def></def-item><def-item><term id="abb3">EHR</term><def><p>electronic health record</p></def></def-item><def-item><term id="abb4">HCP</term><def><p>health care professional</p></def></def-item><def-item><term id="abb5">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb6">NLP</term><def><p>natural language processing</p></def></def-item><def-item><term id="abb7">SDM</term><def><p>shared decision-making</p></def></def-item><def-item><term id="abb8">SNOMED CT</term><def><p>Systematized Nomenclature of Medicine Clinical Terms</p></def></def-item><def-item><term id="abb9">XAI</term><def><p>artificial intelligence explainability</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elwyn</surname><given-names>G</given-names> </name><name name-style="western"><surname>Frosch</surname><given-names>D</given-names> </name><name name-style="western"><surname>Thomson</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Shared decision making: a model for clinical practice</article-title><source>J Gen Intern Med</source><year>2012</year><month>10</month><volume>27</volume><issue>10</issue><fpage>1361</fpage><lpage>1367</lpage><pub-id pub-id-type="doi">10.1007/s11606-012-2077-6</pub-id><pub-id pub-id-type="medline">22618581</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kilbride</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Joffe</surname><given-names>S</given-names> </name></person-group><article-title>The new age of patient autonomy: implications for the patient-physician relationship</article-title><source>JAMA</source><year>2018</year><month>11</month><day>20</day><volume>320</volume><issue>19</issue><fpage>1973</fpage><lpage>1974</lpage><pub-id pub-id-type="doi">10.1001/jama.2018.14382</pub-id><pub-id pub-id-type="medline">30326026</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bajwa</surname><given-names>J</given-names> </name><name name-style="western"><surname>Munir</surname><given-names>U</given-names> </name><name name-style="western"><surname>Nori</surname><given-names>A</given-names> </name><name name-style="western"><surname>Williams</surname><given-names>B</given-names> </name></person-group><article-title>Artificial intelligence in healthcare: transforming the practice of medicine</article-title><source>Future Healthc J</source><year>2021</year><month>07</month><volume>8</volume><issue>2</issue><fpage>e188</fpage><lpage>e194</lpage><pub-id pub-id-type="doi">10.7861/fhj.2021-0095</pub-id><pub-id pub-id-type="medline">34286183</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Liang</surname><given-names>N</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Harnessing the power of clinical decision support systems: challenges and opportunities</article-title><source>Open Heart</source><year>2023</year><month>11</month><volume>10</volume><issue>2</issue><fpage>e002432</fpage><pub-id pub-id-type="doi">10.1136/openhrt-2023-002432</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dixon</surname><given-names>D</given-names> </name><name name-style="western"><surname>Sattar</surname><given-names>H</given-names> </name><name name-style="western"><surname>Moros</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Unveiling the influence of AI predictive analytics on patient outcomes: a comprehensive narrative review</article-title><source>Cureus</source><year>2024</year><volume>16</volume><issue>5</issue><pub-id pub-id-type="doi">10.7759/cureus.59954</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Vetter</surname><given-names>D</given-names> </name><name name-style="western"><surname>Blomberg</surname><given-names>SN</given-names> </name><etal/></person-group><article-title>To explain or not to explain?-artificial intelligence explainability in clinical decision support systems</article-title><source>PLOS Digit Health</source><year>2022</year><month>02</month><volume>1</volume><issue>2</issue><fpage>e0000016</fpage><pub-id pub-id-type="doi">10.1371/journal.pdig.0000016</pub-id><pub-id pub-id-type="medline">36812545</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abbasgholizadeh Rahimi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Cwintal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Application of artificial intelligence in shared decision making: scoping review</article-title><source>JMIR Med Inform</source><year>2022</year><month>08</month><day>9</day><volume>10</volume><issue>8</issue><fpage>e36199</fpage><pub-id pub-id-type="doi">10.2196/36199</pub-id><pub-id pub-id-type="medline">35943793</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Leersum</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Maathuis</surname><given-names>C</given-names> </name></person-group><article-title>Human centred explainable AI decision-making in healthcare</article-title><source>J Responsible Technol</source><year>2025</year><month>03</month><volume>21</volume><fpage>100108</fpage><pub-id pub-id-type="doi">10.1016/j.jrt.2025.100108</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Bouderhem</surname><given-names>R</given-names> </name></person-group><article-title>A comprehensive framework for transparent and explainable AI sensors in healthcare</article-title><conf-name>The 11th International Electronic Conference on Sensors and Applications</conf-name><conf-date>Nov 26-28, 2024</conf-date><pub-id pub-id-type="doi">10.3390/ecsa-11-20524</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Petkovic</surname><given-names>D</given-names> </name></person-group><article-title>It is not &#x201C;Accuracy vs. Explainability&#x201D;&#x2014;we need both for trustworthy AI systems</article-title><source>IEEE Trans Technol Soc</source><year>2023</year><volume>4</volume><issue>1</issue><fpage>46</fpage><lpage>53</lpage><pub-id pub-id-type="doi">10.1109/TTS.2023.3239921</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sutton</surname><given-names>RT</given-names> </name><name name-style="western"><surname>Pincock</surname><given-names>D</given-names> </name><name name-style="western"><surname>Baumgart</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Sadowski</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Fedorak</surname><given-names>RN</given-names> </name><name name-style="western"><surname>Kroeker</surname><given-names>KI</given-names> </name></person-group><article-title>An overview of clinical decision support systems: benefits, risks, and strategies for success</article-title><source>NPJ Digit Med</source><year>2020</year><volume>3</volume><issue>1</issue><fpage>17</fpage><pub-id pub-id-type="doi">10.1038/s41746-020-0221-y</pub-id><pub-id pub-id-type="medline">32047862</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Obermeyer</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Emanuel</surname><given-names>EJ</given-names> </name></person-group><article-title>Predicting the future - big data, machine learning, and clinical medicine</article-title><source>N Engl J Med</source><year>2016</year><month>09</month><day>29</day><volume>375</volume><issue>13</issue><fpage>1216</fpage><lpage>1219</lpage><pub-id pub-id-type="doi">10.1056/NEJMp1606181</pub-id><pub-id pub-id-type="medline">27682033</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajkomar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dean</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kohane</surname><given-names>I</given-names> </name></person-group><article-title>Machine learning in medicine</article-title><source>N Engl J Med</source><year>2019</year><month>04</month><day>4</day><volume>380</volume><issue>14</issue><fpage>1347</fpage><lpage>1358</lpage><pub-id pub-id-type="doi">10.1056/NEJMra1814259</pub-id><pub-id pub-id-type="medline">30943338</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>ZZ</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>D</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Yao</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>From system 1 to system 2: a survey of reasoning large language models</article-title><source>arXiv</source><comment>Preprint posted online on  Feb 24, 2025</comment><pub-id pub-id-type="doi">10.48550/arXiv.2502.17419</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Patil</surname><given-names>A</given-names> </name><name name-style="western"><surname>Jadon</surname><given-names>A</given-names> </name></person-group><article-title>Advancing reasoning in large language models: promising methods and approaches</article-title><source>arXiv</source><comment>Preprint posted online on  May 28, 2025</comment><pub-id pub-id-type="doi">10.48550/arXiv.2502.03671</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Temsah</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Jamal</surname><given-names>A</given-names> </name><name name-style="western"><surname>Alhasan</surname><given-names>K</given-names> </name><name name-style="western"><surname>Temsah</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Malki</surname><given-names>KH</given-names> </name></person-group><article-title>OpenAI o1-preview vs. ChatGPT in healthcare: a new frontier in medical AI reasoning</article-title><source>Cureus</source><year>2024</year><month>10</month><volume>16</volume><issue>10</issue><fpage>e70640</fpage><pub-id pub-id-type="doi">10.7759/cureus.70640</pub-id><pub-id pub-id-type="medline">39359332</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McIntosh</surname><given-names>TR</given-names> </name><name name-style="western"><surname>Susnjak</surname><given-names>T</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>T</given-names> </name><etal/></person-group><article-title>From Google Gemini to OpenAI Q* (Q-Star): a survey on reshaping the generative artificial intelligence (AI) research landscape</article-title><source>Technologies (Basel)</source><year>2025</year><volume>13</volume><issue>2</issue><fpage>51</fpage><pub-id pub-id-type="doi">10.3390/technologies13020051</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Almadani</surname><given-names>B</given-names> </name><name name-style="western"><surname>Kaisar</surname><given-names>H</given-names> </name><name name-style="western"><surname>Thoker</surname><given-names>IR</given-names> </name><name name-style="western"><surname>Aliyu</surname><given-names>F</given-names> </name></person-group><article-title>A systematic survey of distributed decision support systems in healthcare</article-title><source>Systems</source><year>2025</year><volume>13</volume><issue>3</issue><fpage>157</fpage><pub-id pub-id-type="doi">10.3390/systems13030157</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Choudhury</surname><given-names>S</given-names> </name><name name-style="western"><surname>Agarwal</surname><given-names>K</given-names> </name><name name-style="western"><surname>Ham</surname><given-names>C</given-names> </name><name name-style="western"><surname>Tamang</surname><given-names>S</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Tamang</surname><given-names>S</given-names> </name></person-group><source>MediSage: An Ai Assistant for Healthcare via Composition of Neural-Symbolic Reasoning Operators</source><year>2023</year><publisher-name>Association for Computing Machinery</publisher-name><pub-id pub-id-type="doi">10.1145/3543873.3587361</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Machot</surname><given-names>FA</given-names> </name><name name-style="western"><surname>Horsch</surname><given-names>MT</given-names> </name><name name-style="western"><surname>Ullah</surname><given-names>H</given-names> </name></person-group><article-title>Symbolic-AI-fusion deep learning (SAIF-DL): encoding knowledge into training with answer set programming loss penalties by a novel loss function approach</article-title><source>arXiv</source><comment>Preprint posted online on  Nov 13, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2411.08463</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Garg</surname><given-names>S</given-names> </name><name name-style="western"><surname>Parikh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Garg</surname><given-names>S</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Garg</surname><given-names>S</given-names> </name><name name-style="western"><surname>Parikh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Garg</surname><given-names>S</given-names> </name></person-group><article-title>Navigating healthcare insights: a bird&#x2019;s eye view of explainability with knowledge graphs</article-title><conf-name>2023 IEEE Sixth International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)</conf-name><conf-date>Sep 25-27, 2023</conf-date><conf-loc>Laguna Hills, CA, USA</conf-loc><pub-id pub-id-type="doi">10.1109/AIKE59827.2023.00016</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khosravi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Zare</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Mojtabaeian</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Izadi</surname><given-names>R</given-names> </name></person-group><article-title>Artificial intelligence and decision-making in healthcare: a thematic analysis of a systematic review of reviews</article-title><source>Health Serv Res Manag Epidemiol</source><year>2024</year><volume>11</volume><fpage>23333928241234863</fpage><pub-id pub-id-type="doi">10.1177/23333928241234863</pub-id><pub-id pub-id-type="medline">38449840</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Nguyen</surname><given-names>KN</given-names> </name><name name-style="western"><surname>Le-Duc</surname><given-names>K</given-names> </name><name name-style="western"><surname>Tat</surname><given-names>BP</given-names> </name><name name-style="western"><surname>Vo-Dang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hy</surname><given-names>TS</given-names> </name></person-group><article-title>Sentiment reasoning for healthcare</article-title><source>arXiv</source><comment>Preprint posted online on  May 27, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2407.21054</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beaubier</surname><given-names>N</given-names> </name><name name-style="western"><surname>Bontrager</surname><given-names>M</given-names> </name><name name-style="western"><surname>Huether</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Integrated genomic profiling expands clinical options for patients with cancer</article-title><source>Nat Biotechnol</source><year>2019</year><month>11</month><volume>37</volume><issue>11</issue><fpage>1351</fpage><lpage>1360</lpage><pub-id pub-id-type="doi">10.1038/s41587-019-0259-z</pub-id><pub-id pub-id-type="medline">31570899</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Deliu</surname><given-names>N</given-names> </name><name name-style="western"><surname>Chakraborty</surname><given-names>B</given-names> </name></person-group><article-title>Artificial intelligence-based decision support systems for precision and digital health</article-title><source>arXiv</source><comment>Preprint posted online on  Jul 22, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2407.16062</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="web"><article-title>NIH findings shed light on risks and benefits of integrating AI into medical decision-making</article-title><source>National Institutes of Health</source><year>2024</year><access-date>2025-03-08</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.nih.gov/news-events/news-releases/nih-findings-shed-light-risks-benefits-integrating-ai-into-medical-decision-making">https://www.nih.gov/news-events/news-releases/nih-findings-shed-light-risks-benefits-integrating-ai-into-medical-decision-making</ext-link></comment></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajabi</surname><given-names>E</given-names> </name><name name-style="western"><surname>Kafaie</surname><given-names>S</given-names> </name></person-group><article-title>Knowledge graphs and explainable AI in healthcare</article-title><source>Information</source><year>2022</year><volume>13</volume><issue>10</issue><fpage>459</fpage><pub-id pub-id-type="doi">10.3390/info13100459</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>L&#x00E9;gar&#x00E9;</surname><given-names>F</given-names> </name><name name-style="western"><surname>O&#x2019;Connor</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Graham</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Supporting patients facing difficult health care decisions: use of the Ottawa decision support framework</article-title><source>Can Fam Physician</source><year>2006</year><month>04</month><volume>52</volume><issue>4</issue><fpage>476</fpage><lpage>477</lpage><pub-id pub-id-type="medline">17327891</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Blasimme</surname><given-names>A</given-names> </name><name name-style="western"><surname>Vayena</surname><given-names>E</given-names> </name><name name-style="western"><surname>Frey</surname><given-names>D</given-names> </name><name name-style="western"><surname>Madai</surname><given-names>VI</given-names> </name><collab>Precise4Q consortium</collab></person-group><article-title>Explainability for artificial intelligence in healthcare: a multidisciplinary perspective</article-title><source>BMC Med Inform Decis Mak</source><year>2020</year><month>11</month><day>30</day><volume>20</volume><issue>1</issue><fpage>310</fpage><pub-id pub-id-type="doi">10.1186/s12911-020-01332-6</pub-id><pub-id pub-id-type="medline">33256715</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gerdes</surname><given-names>A</given-names> </name></person-group><article-title>The role of explainability in AI-supported medical decision-making</article-title><source>Discov Artif Intell</source><year>2024</year><volume>4</volume><issue>1</issue><fpage>29</fpage><pub-id pub-id-type="doi">10.1007/s44163-024-00119-2</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Borna</surname><given-names>S</given-names> </name><name name-style="western"><surname>Maniaci</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Haider</surname><given-names>CR</given-names> </name><etal/></person-group><article-title>Artificial intelligence models in health information exchange: a systematic review of clinical implications</article-title><source>Healthcare (Basel)</source><year>2023</year><month>09</month><day>19</day><volume>11</volume><issue>18</issue><fpage>2584</fpage><pub-id pub-id-type="doi">10.3390/healthcare11182584</pub-id><pub-id pub-id-type="medline">37761781</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chatterjee</surname><given-names>A</given-names> </name><name name-style="western"><surname>Pahari</surname><given-names>N</given-names> </name><name name-style="western"><surname>Prinz</surname><given-names>A</given-names> </name></person-group><article-title>HL7 FHIR with SNOMED-CT to achieve semantic and structural interoperability in personal health data: a proof-of-concept study</article-title><source>Sensors (Basel)</source><year>2022</year><month>05</month><day>15</day><volume>22</volume><issue>10</issue><fpage>3756</fpage><pub-id pub-id-type="doi">10.3390/s22103756</pub-id><pub-id pub-id-type="medline">35632165</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Croskerry</surname><given-names>P</given-names> </name></person-group><article-title>A universal model of diagnostic reasoning</article-title><source>Acad Med</source><year>2009</year><month>08</month><volume>84</volume><issue>8</issue><fpage>1022</fpage><lpage>1028</lpage><pub-id pub-id-type="doi">10.1097/ACM.0b013e3181ace703</pub-id><pub-id pub-id-type="medline">19638766</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hoefel</surname><given-names>L</given-names> </name><name name-style="western"><surname>Lewis</surname><given-names>KB</given-names> </name><name name-style="western"><surname>O&#x2019;Connor</surname><given-names>A</given-names> </name><name name-style="western"><surname>Stacey</surname><given-names>D</given-names> </name></person-group><article-title>20th anniversary update of the Ottawa decision support framework: part 2 subanalysis of a systematic review of patient decision aids</article-title><source>Med Decis Making</source><year>2020</year><month>05</month><volume>40</volume><issue>4</issue><fpage>522</fpage><lpage>539</lpage><pub-id pub-id-type="doi">10.1177/0272989X20924645</pub-id><pub-id pub-id-type="medline">32522091</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Epstein</surname><given-names>RM</given-names> </name><name name-style="western"><surname>Street</surname><given-names>RL</given-names> </name></person-group><article-title>The values and value of patient-centered care</article-title><source>Ann Fam Med</source><year>2011</year><volume>9</volume><issue>2</issue><fpage>100</fpage><lpage>103</lpage><pub-id pub-id-type="doi">10.1370/afm.1239</pub-id><pub-id pub-id-type="medline">21403134</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Witteman</surname><given-names>HO</given-names> </name><name name-style="western"><surname>Maki</surname><given-names>KG</given-names> </name><name name-style="western"><surname>Vaisson</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Systematic development of patient decision aids: an update from the IPDAS collaboration</article-title><source>Med Decis Making</source><year>2021</year><month>10</month><volume>41</volume><issue>7</issue><fpage>736</fpage><lpage>754</lpage><pub-id pub-id-type="doi">10.1177/0272989X211014163</pub-id><pub-id pub-id-type="medline">34148384</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jayakumar</surname><given-names>P</given-names> </name><name name-style="western"><surname>Moore</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Furlough</surname><given-names>KA</given-names> </name><etal/></person-group><article-title>Comparison of an artificial intelligence-enabled patient decision aid vs educational material on decision quality, shared decision-making, patient experience, and functional outcomes in adults with knee osteoarthritis: a randomized clinical trial</article-title><source>JAMA Netw Open</source><year>2021</year><month>02</month><day>1</day><volume>4</volume><issue>2</issue><fpage>e2037107</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2020.37107</pub-id><pub-id pub-id-type="medline">33599773</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Warner</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Harrington</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Sacco</surname><given-names>RL</given-names> </name><name name-style="western"><surname>Elkind</surname><given-names>MSV</given-names> </name></person-group><article-title>Guidelines for the early management of patients with acute ischemic stroke: 2019 update to the 2018 guidelines for the early management of acute ischemic stroke</article-title><source>Stroke</source><year>2019</year><month>12</month><volume>50</volume><issue>12</issue><fpage>3331</fpage><lpage>3332</lpage><pub-id pub-id-type="doi">10.1161/STROKEAHA.119.027708</pub-id><pub-id pub-id-type="medline">31662117</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Albers</surname><given-names>GW</given-names> </name><name name-style="western"><surname>Marks</surname><given-names>MP</given-names> </name><name name-style="western"><surname>Kemp</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Thrombectomy for stroke at 6 to 16 hours with selection by perfusion imaging</article-title><source>N Engl J Med</source><year>2018</year><month>02</month><day>22</day><volume>378</volume><issue>8</issue><fpage>708</fpage><lpage>718</lpage><pub-id pub-id-type="doi">10.1056/NEJMoa1713973</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Broderick</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Adeoye</surname><given-names>O</given-names> </name><name name-style="western"><surname>Elm</surname><given-names>J</given-names> </name></person-group><article-title>Evolution of the modified rankin scale and its use in future stroke trials</article-title><source>Stroke</source><year>2017</year><month>07</month><volume>48</volume><issue>7</issue><fpage>2007</fpage><lpage>2012</lpage><pub-id pub-id-type="doi">10.1161/STROKEAHA.117.017866</pub-id><pub-id pub-id-type="medline">28626052</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dagli</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Ghenbot</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Ahmad</surname><given-names>HS</given-names> </name><etal/></person-group><article-title>Development and validation of a novel AI framework using NLP with LLM integration for relevant clinical data extraction through automated chart review</article-title><source>Sci Rep</source><year>2024</year><month>11</month><day>5</day><volume>14</volume><issue>1</issue><fpage>26783</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-77535-y</pub-id><pub-id pub-id-type="medline">39500759</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goyal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Menon</surname><given-names>BK</given-names> </name><name name-style="western"><surname>van Zwam</surname><given-names>WH</given-names> </name><etal/></person-group><article-title>Endovascular thrombectomy after large-vessel ischaemic stroke: a meta-analysis of individual patient data from five randomised trials</article-title><source>Lancet</source><year>2016</year><month>04</month><volume>387</volume><issue>10029</issue><fpage>1723</fpage><lpage>1731</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(16)00163-X</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nogueira</surname><given-names>RG</given-names> </name><name name-style="western"><surname>Jadhav</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Haussen</surname><given-names>DC</given-names> </name><etal/></person-group><article-title>Thrombectomy 6 to 24 hours after stroke with a mismatch between deficit and infarct</article-title><source>N Engl J Med</source><year>2018</year><month>01</month><day>4</day><volume>378</volume><issue>1</issue><fpage>11</fpage><lpage>21</lpage><pub-id pub-id-type="doi">10.1056/NEJMoa1706442</pub-id><pub-id pub-id-type="medline">29129157</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Saeed</surname><given-names>F</given-names> </name><name name-style="western"><surname>Schell</surname><given-names>JO</given-names> </name></person-group><article-title>Shared decision making for older adults: time to move beyond dialysis as a default</article-title><source>Ann Intern Med</source><year>2023</year><month>01</month><volume>176</volume><issue>1</issue><fpage>129</fpage><lpage>130</lpage><pub-id pub-id-type="doi">10.7326/M22-3431</pub-id><pub-id pub-id-type="medline">36534979</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Rayner</surname><given-names>HC</given-names> </name><name name-style="western"><surname>Thomas</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Dasgupta</surname><given-names>I</given-names> </name><name name-style="western"><surname>Lalayiannis</surname><given-names>AD</given-names> </name><name name-style="western"><surname>Hameed</surname><given-names>MA</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Rayner</surname><given-names>HC</given-names> </name><name name-style="western"><surname>Thomas</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Dasgupta</surname><given-names>I</given-names> </name><name name-style="western"><surname>Lalayiannis</surname><given-names>AD</given-names> </name><name name-style="western"><surname>Hameed</surname><given-names>MA</given-names> </name></person-group><article-title>Planning treatment: when and how to prepare for a life with kidney disease</article-title><source>Understanding Kidney Diseases</source><year>2024</year><edition>3</edition><publisher-name>Springer Nature Switzerland</publisher-name><fpage>381</fpage><lpage>408</lpage><pub-id pub-id-type="other">9783031663499</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Perpetua</surname><given-names>EM</given-names> </name><name name-style="western"><surname>Palmer</surname><given-names>R</given-names> </name><name name-style="western"><surname>Le</surname><given-names>VT</given-names> </name><etal/></person-group><article-title>JACC: Advances expert panel perspective: shared decision-making in multidisciplinary team-based cardiovascular care</article-title><source>JACC Adv</source><year>2024</year><month>07</month><volume>3</volume><issue>7</issue><fpage>100981</fpage><pub-id pub-id-type="doi">10.1016/j.jacadv.2024.100981</pub-id><pub-id pub-id-type="medline">39130036</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajkomar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hardt</surname><given-names>M</given-names> </name><name name-style="western"><surname>Howell</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Corrado</surname><given-names>G</given-names> </name><name name-style="western"><surname>Chin</surname><given-names>MH</given-names> </name></person-group><article-title>Ensuring fairness in machine learning to advance health equity</article-title><source>Ann Intern Med</source><year>2018</year><month>12</month><day>18</day><volume>169</volume><issue>12</issue><fpage>866</fpage><lpage>872</lpage><pub-id pub-id-type="doi">10.7326/M18-1990</pub-id><pub-id pub-id-type="medline">30508424</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Obermeyer</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Powers</surname><given-names>B</given-names> </name><name name-style="western"><surname>Vogeli</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mullainathan</surname><given-names>S</given-names> </name></person-group><article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title><source>Science</source><year>2019</year><month>10</month><day>25</day><volume>366</volume><issue>6464</issue><fpage>447</fpage><lpage>453</lpage><pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id><pub-id pub-id-type="medline">31649194</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wurster</surname><given-names>F</given-names> </name><name name-style="western"><surname>Di Gion</surname><given-names>P</given-names> </name><name name-style="western"><surname>Goldberg</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Roger&#x2019;s diffusion of innovations theory and the adoption of a patient portal&#x2019;s digital anamnesis collection tool: study protocol for the MAiBest project</article-title><source>Implement Sci Commun</source><year>2024</year><month>07</month><day>15</day><volume>5</volume><issue>1</issue><fpage>74</fpage><pub-id pub-id-type="doi">10.1186/s43058-024-00614-8</pub-id><pub-id pub-id-type="medline">39010236</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sauerbrei</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kerasidou</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lucivero</surname><given-names>F</given-names> </name><name name-style="western"><surname>Hallowell</surname><given-names>N</given-names> </name></person-group><article-title>The impact of artificial intelligence on the person-centred, doctor-patient relationship: some problems and solutions</article-title><source>BMC Med Inform Decis Mak</source><year>2023</year><month>04</month><day>20</day><volume>23</volume><issue>1</issue><fpage>73</fpage><pub-id pub-id-type="doi">10.1186/s12911-023-02162-y</pub-id><pub-id pub-id-type="medline">37081503</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tretter</surname><given-names>M</given-names> </name></person-group><article-title>Equipping AI-decision-support-systems with emotional capabilities? ethical perspectives</article-title><source>Front Artif Intell</source><year>2024</year><volume>7</volume><fpage>1398395</fpage><pub-id pub-id-type="doi">10.3389/frai.2024.1398395</pub-id><pub-id pub-id-type="medline">38881951</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sakthidevi</surname><given-names>I</given-names> </name><name name-style="western"><surname>Fathima</surname><given-names>G</given-names> </name></person-group><article-title>Improving access trust in healthcare through multimodal deep learning for affective computing</article-title><source>Hum-Cent Intell Syst</source><year>2024</year><volume>4</volume><issue>4</issue><fpage>511</fpage><lpage>526</lpage><pub-id pub-id-type="doi">10.1007/s44230-024-00080-4</pub-id></nlm-citation></ref></ref-list></back></article>