<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="review-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR AI</journal-id>
      <journal-title>JMIR AI</journal-title>
      <issn pub-type="epub">2817-1705</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v3i1e53207</article-id>
      <article-id pub-id-type="pmid">39476365</article-id>
      <article-id pub-id-type="doi">10.2196/53207</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>How Explainable Artificial Intelligence Can Increase or Decrease Clinicians’ Trust in AI Applications in Health Care: Systematic Review</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Yin</surname>
            <given-names>Zhijun</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Mesko</surname>
            <given-names>Bertalan</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Dhunnoo</surname>
            <given-names>Pranavsingh</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Wang</surname>
            <given-names>Ning</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Rosenbacke</surname>
            <given-names>Rikard</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Centre for Corporate Governance</institution>
            <institution>Department of Accounting</institution>
            <institution>Copenhagen Business School</institution>
            <addr-line>Solbjerg Plads 3</addr-line>
            <addr-line>Frederiksberg, DK-2000</addr-line>
            <country>Denmark</country>
            <phone>45 709990907</phone>
            <email>rikard@rosenbacke.com</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0007-4504-9106</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Melhus</surname>
            <given-names>Åsa</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0006-1191-4061</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>McKee</surname>
            <given-names>Martin</given-names>
          </name>
          <degrees>MD, DSc</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0121-9683</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Stuckler</surname>
            <given-names>David</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1288-8401</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Centre for Corporate Governance</institution>
        <institution>Department of Accounting</institution>
        <institution>Copenhagen Business School</institution>
        <addr-line>Frederiksberg</addr-line>
        <country>Denmark</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Medical Sciences</institution>
        <institution>Clinical Microbiology</institution>
        <institution>Uppsala University</institution>
        <addr-line>Uppsala</addr-line>
        <country>Sweden</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>European Observatory on Health Systems and Policies</institution>
        <institution>London School of Hygiene &#38; Tropical Medicine</institution>
        <addr-line>London</addr-line>
        <country>United Kingdom</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Social and Political Sciences</institution>
        <institution>Bocconi University</institution>
        <addr-line>Milano</addr-line>
        <country>Italy</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Rikard Rosenbacke <email>rikard@rosenbacke.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>30</day>
        <month>10</month>
        <year>2024</year>
      </pub-date>
      <volume>3</volume>
      <elocation-id>e53207</elocation-id>
      <history>
        <date date-type="received">
          <day>29</day>
          <month>9</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>5</day>
          <month>3</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>22</day>
          <month>3</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>17</day>
          <month>9</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Rikard Rosenbacke, Åsa Melhus, Martin McKee, David Stuckler. Originally published in JMIR AI (https://ai.jmir.org), 30.10.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on https://www.ai.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://ai.jmir.org/2024/1/e53207" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Artificial intelligence (AI) has significant potential in clinical practice. However, its “black box” nature can lead clinicians to question its value. The challenge is to create sufficient trust for clinicians to feel comfortable using AI, but not so much that they defer to it even when it produces results that conflict with their clinical judgment in ways that lead to incorrect decisions. Explainable AI (XAI) aims to address this by providing explanations of how AI algorithms reach their conclusions. However, it remains unclear whether such explanations foster an appropriate degree of trust to ensure the optimal use of AI in clinical practice.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aims to systematically review and synthesize empirical evidence on the impact of XAI on clinicians’ trust in AI-driven clinical decision-making.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A systematic review was conducted in accordance with PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) guidelines, searching PubMed and Web of Science databases. Studies were included if they empirically measured the impact of XAI on clinicians’ trust using cognition- or affect-based measures. Out of 778 articles screened, 10 met the inclusion criteria. We assessed the risk of bias using standard tools appropriate to the methodology of each paper.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The risk of bias in all papers was moderate or moderate to high. All included studies operationalized trust primarily through cognitive-based definitions, with 2 also incorporating affect-based measures. Out of these, 5 studies reported that XAI increased clinicians’ trust compared with standard AI, particularly when the explanations were clear, concise, and relevant to clinical practice. In addition, 3 studies found no significant effect of XAI on trust, and the presence of explanations does not automatically improve trust. Notably, 2 studies highlighted that XAI could either enhance or diminish trust, depending on the complexity and coherence of the provided explanations. The majority of studies suggest that XAI has the potential to enhance clinicians’ trust in recommendations generated by AI. However, complex or contradictory explanations can undermine this trust. More critically, trust in AI is not inherently beneficial, as AI recommendations are not infallible. These findings underscore the nuanced role of explanation quality and suggest that trust can be modulated through the careful design of XAI systems.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Excessive trust in incorrect advice generated by AI can adversely impact clinical accuracy, just as can happen when correct advice is distrusted. Future research should focus on refining both cognitive and affect-based measures of trust and on developing strategies to achieve an appropriate balance in terms of trust, preventing both blind trust and undue skepticism. Optimizing trust in AI systems is essential for their effective integration into clinical practice.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>explainable artificial intelligence</kwd>
        <kwd>XAI</kwd>
        <kwd>trustworthy AI</kwd>
        <kwd>clinician trust</kwd>
        <kwd>affect-based measures</kwd>
        <kwd>cognitive measures</kwd>
        <kwd>clinical use</kwd>
        <kwd>clinical decision-making</kwd>
        <kwd>clinical informatics</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Artificial intelligence (AI) is increasingly being promoted as a means to transform health care. AI can enhance clinical decision-making, reduce medical errors, and improve patient outcomes [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Yet, to realize its full potential in health care, clinicians must trust it and be comfortable with its outputs [<xref ref-type="bibr" rid="ref3">3</xref>]. Establishing and maintaining trust is challenging, especially in light of growing warnings from some leading AI experts about its potential risks to society [<xref ref-type="bibr" rid="ref4">4</xref>].</p>
      <p>Currently, there is a dearth of studies on how to increase trust in AI among clinicians. In a recent systematic review on trust in AI, it was observed that transparency is critical for fostering trust among decision makers [<xref ref-type="bibr" rid="ref5">5</xref>]. To increase transparency and, thus, trust in AI, it has been proposed that measures should be added to its predictions to make the models more transparent and explainable to human users [<xref ref-type="bibr" rid="ref6">6</xref>]. So-called explainable AI (XAI) can be considered to fall within several categories: (1) “local” (specific) explanations of an individual prediction [<xref ref-type="bibr" rid="ref7">7</xref>], (2) “global” explanations presenting the model’s general logic [<xref ref-type="bibr" rid="ref8">8</xref>], (3) “counterfactual” explanations indicating a threshold at which the algorithm could change its recommendations, (4) confidence explanations, indicating the probability that the prediction is correct [<xref ref-type="bibr" rid="ref9">9</xref>]; and (5) example-based, where the AI justifies its decision by providing examples that have similar characteristics from the same dataset [<xref ref-type="bibr" rid="ref10">10</xref>].</p>
      <p>Trust is a complex concept that has been explored in a range of disciplines, including philosophy, economics, sociology, and psychology [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref15">15</xref>], with a recent review by one of us [<xref ref-type="bibr" rid="ref16">16</xref>] noting how little interaction exists between these disciplinary perspectives. Here, we rely on psychological models, which we consider to be particularly helpful in this context. In a dual theory developed by Kahneman [<xref ref-type="bibr" rid="ref17">17</xref>], 2 main ways of thinking exist. The first is quick and based on gut feelings or intuition, whereas the second is slower, taking a more thoughtful and reasoning approach. Trust forms a mental picture of another person or a system, and when trying to untangle all its intricacies, it is practically impossible to use only rational thought. Consequently, the decision to trust someone or something like an AI tool or a physician is often derived from an instinctive judgment or intuition. In this model, trust is viewed as a decision-making shortcut, enabling the decision maker to select information while ignoring other information to simplify a complex decision [<xref ref-type="bibr" rid="ref18">18</xref>]. Applied to empirical research, Madsen et al [<xref ref-type="bibr" rid="ref19">19</xref>] describe these 2 broad approaches as cognition-based trust and affect-based trust, terms that we will use in this study.</p>
      <p>A series of recent reviews have examined XAI from a trusted perspective. However, partly reflecting the speed of development of the field, these do not include the most recent empirical evidence from clinical settings, although they did consistently speculate that XAI could increase users’ trust and thus the intention to use AI tools [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>], as well as enhance confidence in decisions and thus, the trust of clinicians [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>]. None of these studies differentiated between varying trust measures or health care domains.</p>
      <p>To fill this gap, we performed a systematic review of empirical evidence on the impact of XAI on clinicians’ trust. In addition, we categorized and differentiated studies according to which type of trust measure they used, cognition- or affect-based trust, as well as types of medical data used (imaging vs tabular formats).</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Search Strategy</title>
        <p>A total of 2 authors (RR and DS) performed a systematic review in accordance with the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) guidelines [<xref ref-type="bibr" rid="ref24">24</xref>]. On March 23, 2023, we searched the title and abstract fields of PubMed and recognized that the topic would be covered by a wide range of disciplines; hence, we also used the Web of Science database. We searched for published articles on XAI and trust within health care. Our initial reading revealed the use of many words that conveyed some aspect of what we might consider “trust.” In light of this work and the many different conceptions of trust [<xref ref-type="bibr" rid="ref25">25</xref>], we intentionally used a broad search strategy without specifying trust and its alternative variants (such as confidence, intention to use, etc) to avoid the risk of “type-2 errors” whereby relevant articles that should have been included were omitted.</p>
        <p>We operationalized XAI and health care using a range of keyword permutations adapted to each database (full strategy in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p>
      </sec>
      <sec>
        <title>Inclusion and Exclusion Criteria</title>
        <p>We applied a range of inclusion and exclusion criteria. Articles were included if they (1) measured trust (and related terms) as an outcome, (2) used XAI as an intervention or exposure, (3) used machine learning (ML) in the underlying AI model, (4) were empirical studies, and (5) were carried out by practicing clinicians. Articles were excluded if they were (1) reviews, commentaries, reports of methodology, or conceptual papers or (2) not applied in a health care setting from a clinician’s perspective. Furthermore, 2 reviewers, RR and DS, performed the screening, and any disputes were resolved against these prespecified criteria and with a third reviewer (ÅM).</p>
      </sec>
      <sec>
        <title>Extraction and Analysis</title>
        <p>We extracted from each included study the following data: author, year of publication, country, health care domain, discipline behind the study, image versus tabular data input, study design and setting, clinical or experimental setting, sample size, intervention or exposure of interest, outcome measures, study results, and conclusions. Data were entered into a Microsoft Excel spreadsheet for analysis. RR extracted the data using the preestablished data entry format, with verification by DS to ensure consistency. We disaggregated the analysis by trust dimensions (cognitive versus affect-based) and by type of data evaluated (image versus tabular data). We also assessed each paper for risk of bias, using either the Cochrane Risk of Bias 2 (RoB 2) or Risk of Bias in Non-randomized Studies of Interventions (ROBINS-I) tool.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview of Search Results</title>
        <p>Our initial search identified 373 publications in PubMed and 713 publications in Web of Science, 308 of which were duplicates, leaving 778 for the screening and eligibility stages. We excluded 300 records since they were reviews, commentaries, methodological reports, conceptual papers, or not related to the health care sector. A total of 83 papers did not study XAI, and 347 were not empirical studies with trust as an outcome and explanations as an intervention. This left 48, all of which were successfully retrieved. We excluded another 38 studies when reviewing the full text as they did not measure trust or XAI empirically, or the evaluation was not carried out by practicing clinicians. This yielded 10 articles for the final review (<xref rid="figure1" ref-type="fig">Figure 1</xref>) [<xref ref-type="bibr" rid="ref26">26</xref>-<xref ref-type="bibr" rid="ref35">35</xref>].</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flow chart. XAI: explainable artificial intelligence.</p>
          </caption>
          <graphic xlink:href="ai_v3i1e53207_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>The publications were imported into Zotero (Corporation for Digital Scholarship) reference management software. The PRISMA flow diagram of our review is shown in <xref rid="figure1" ref-type="fig">Figure 1</xref> (PRISMA checklist provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>).</p>
      </sec>
      <sec>
        <title>Characteristics of Included Studies</title>
        <p><xref ref-type="table" rid="table1">Table 1</xref> provides a summary of the final studies. There was a clear increase in papers on trust and XAI in health care during 2022; 70% (7/10) were published between 2022 and the end of the inclusion period on March 23, 2023.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Summary of the extracted studies.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="130"/>
            <col width="90"/>
            <col width="120"/>
            <col width="110"/>
            <col width="100"/>
            <col width="90"/>
            <col width="140"/>
            <col width="120"/>
            <col width="100"/>
            <thead>
              <tr valign="top">
                <td>Title</td>
                <td>Authors (Year) Country</td>
                <td>Study discipline</td>
                <td>Respondents (Sample size, n)</td>
                <td>Health care domain</td>
                <td>Tabular or Image</td>
                <td>Description of intervention</td>
                <td>Trust measurement</td>
                <td>Trust improvement</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>As if sand were stone. New concepts and metrics to probe the ground on which to build trustable AI</td>
                <td>Cabitza et al [<xref ref-type="bibr" rid="ref26">26</xref>] (2020) Italy</td>
                <td>Computer science, Orthopedic and biomedicine</td>
                <td>Physician (13)</td>
                <td>Radiology</td>
                <td>Image</td>
                <td>Measure radiologists’ confidence score as a marker for trust</td>
                <td>Quantitative confidence score, 6-grade scale.</td>
                <td>No effect</td>
              </tr>
              <tr valign="top">
                <td>Doctor’s dilemma: Evaluating an explainable subtractive spatial lightweight convolutional neural network for brain tumor diagnosis</td>
                <td>Kumar et al [<xref ref-type="bibr" rid="ref27">27</xref>] (2021) India</td>
                <td>Computer science</td>
                <td>Physicians (10)</td>
                <td>Brain tumor</td>
                <td>Image</td>
                <td>Building an explainable deep learning model to reduce complexity in MR classifications.</td>
                <td>Quantitative doctor survey using 5-grade Likert Scale.</td>
                <td>Increased trust</td>
              </tr>
              <tr valign="top">
                <td>Does AI explainability affect physicians' intention to use AI?</td>
                <td>Liu et al (2022) [<xref ref-type="bibr" rid="ref35">35</xref>] Taiwan</td>
                <td>Medical research, cardiology, pediatrics</td>
                <td>Physicians (295)</td>
                <td>—<sup>a</sup></td>
                <td>Image</td>
                <td>Comparing intention to use XAI vs AI</td>
                <td>Quantitative survey using a 5-grade scale.</td>
                <td>Increased trust</td>
              </tr>
              <tr valign="top">
                <td>Explainable recommendation: when design meets trust calibration.</td>
                <td>Naiseh et al [<xref ref-type="bibr" rid="ref28">28</xref>] (2021) United Kingdom</td>
                <td>Computer science</td>
                <td>Physicians and pharmacists (24)</td>
                <td>Oncology</td>
                <td>Tabular</td>
                <td>Involved physicians and pharmacists in think-aloud study and codesign to identify potential trust calibration errors</td>
                <td>Qualitative interviews analyzed using content analysis.</td>
                <td>Varied, depending on factors such as the form of explanation</td>
              </tr>
              <tr valign="top">
                <td>How the different explanation classes impact trust calibration: The case of clinical decision support systems</td>
                <td>Naiseh et al [<xref ref-type="bibr" rid="ref29">29</xref>] (2023) United Kingdom</td>
                <td>Computer science</td>
                <td>Physicians and pharmacists (41)</td>
                <td>Chemotherapy</td>
                <td>Tabular</td>
                <td>Trust calibration for 4 XAI classes (counterfactuals, example-based, global and local explanations) vs no explanations</td>
                <td>Quantitative self-reporting cognitive-based trust using a 5-grade scale and qualitative interviews were coded.</td>
                <td>Varied, depending on factors such as the form of explanation</td>
              </tr>
              <tr valign="top">
                <td>Interpretable clinical time-series modeling with intelligent feature selection for early prediction of antimicrobial multidrug resistance</td>
                <td>Martínez-Agüero et al [<xref ref-type="bibr" rid="ref34">34</xref>] (2022) Spain</td>
                <td>Computer science and Intensive care department for validation</td>
                <td>Clinicians (no specification)<break/>  <break/>  </td>
                <td>Antibiotic resistance</td>
                <td>Tabular</td>
                <td>SHAP explanations for predictors to provide clinicians with explanations in natural language</td>
                <td>Qualitative, where clinicians self-report.</td>
                <td>Increased trust</td>
              </tr>
              <tr valign="top">
                <td>Nontask expert physicians benefit from correct explainable AI advice when reviewing X-rays.</td>
                <td>Gaube et al [<xref ref-type="bibr" rid="ref33">33</xref>] (2023) United States and Canada</td>
                <td>Medicine, psychology, and computer science</td>
                <td>Internal or emergency medicine physicians and radiologists (223)</td>
                <td>Radiology</td>
                <td>Image</td>
                <td>Visible annotation on the X-ray done by human or XAI</td>
                <td>Quantitative self-reporting using 7-grade scale.</td>
                <td>No effect</td>
              </tr>
              <tr valign="top">
                <td>The explainability paradox: Challenges for XAI in digital pathology</td>
                <td>Evans, et al [<xref ref-type="bibr" rid="ref30">30</xref>] (2022)</td>
                <td>Computer science and biomedicine</td>
                <td>Board-certified pathologists and professionals in pathology or neuropathology (6+25)</td>
                <td>Pathology</td>
                <td>Image</td>
                <td>Saliency maps to explain predictions through visualizations</td>
                <td>Quantitative self-reporting using 7-grade scale. Qualitative semistructured interviews.</td>
                <td>Increased trust</td>
              </tr>
              <tr valign="top">
                <td>Trustworthy AI explanations as an interface in medical diagnostic systems</td>
                <td>Kaur et al [<xref ref-type="bibr" rid="ref31">31</xref>] (2022) United States</td>
                <td>Computer science</td>
                <td>Physicians (2)</td>
                <td>Breast cancer prediction</td>
                <td>Image</td>
                <td>Involved physicians evaluate 3 different systems and rate them “Trustworthy Explainability Acceptance.”</td>
                <td>Quantitative, trust is calculated using both impression and confidence.</td>
                <td>Developed framework to measure trust. No effect identified</td>
              </tr>
              <tr valign="top">
                <td>UK reporting radiographers’ perceptions of AI in radiographic image interpretation current perspectives and future developments</td>
                <td>Rainey et al [<xref ref-type="bibr" rid="ref32">32</xref>] (2022) United Kingdom</td>
                <td>Health science, radiography, and computer science</td>
                <td>Radiographers (86)</td>
                <td>Radiography</td>
                <td>Image</td>
                <td>—</td>
                <td>Quantitative self-reporting using 10-grade scale.</td>
                <td>Increased trust</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>Not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>The studies displayed marked heterogeneity in methods, disciplinary collaboration, and perspectives of trust. All but 1 involved computer scientists; 4 were conducted solely by computer scientists without involvement by experts with a medical background, and the remaining 5 involved collaborations between medical experts and computer scientists. The inputs to the AI tools were medical imaging or tabular data formats. The risk of bias in each study is reported in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>. In all studies, the risk of bias was moderate or moderate to high.</p>
        <p>We begin by looking at studies of medical imaging and tabular data separately, providing an overview of the characteristics and results before moving on to talk about the different ways in which studies conceptualize or measure trust (as we found that this seemed to be a key consideration in interpreting studies’ results).</p>
      </sec>
      <sec>
        <title>Medical Imaging</title>
        <p>Out of the 7 medical imaging studies reviewed, 4 (57%) identified a significant and positive association between the use of XAI and perceived trust, 1 study (14%) reached no clear conclusions, while 2 (29%) found limited or no significant impact.</p>
        <p>A study by Liu et al [<xref ref-type="bibr" rid="ref35">35</xref>] asked 295 physicians across 3 hospitals in Taiwan if explanations increased their trust in the algorithm and their propensity to use XAI compared with AI. They found that physicians were more inclined to trust and implement AI in clinical practice if they perceived the results as being more explainable or comprehensible. Similarly, a web-based experiment by Evans et al [<xref ref-type="bibr" rid="ref30">30</xref>] surveyed trust levels among board-certified physicians in pathology or neuropathology in using XAI to interpret pathology images. The XAI instrument highlighted the areas in medical images that determined whether the prediction was made with high or low confidence. In addition, 70% agreed that their level of trust increased as a result of the explanations provided, while approximately 10% disagreed, and the rest were undecided.</p>
        <p>A study by Cabitza et al [<xref ref-type="bibr" rid="ref26">26</xref>] differentiated Gold Standard labels (categorizing cases as positive or negative) from Diamond Standard ones, where the reason for categorization was annotated and indicated confidence in the allocation. A total of 13 radiologists were then asked to evaluate images of knees. Confidence in the allocation was considered a proxy for trust, and there was no association between confidence and accuracy. Gaube et al [<xref ref-type="bibr" rid="ref33">33</xref>] conducted a qualitative investigation of 117 clinical residents or practicing emergency medicine physicians and 106 radiologists. They reported that explanations had little or no significant impact on the trust and the perceived usefulness of AI. The participants were shown x-rays with and without annotations as explanations. Internal and emergency medicine physicians (IM/EM), who lacked specialist training in radiology, achieved better diagnostic accuracy when provided with explanations (<italic>P</italic><sub>IM/EM</sub>=.042), but there was no such benefit for radiologists (<italic>P</italic><sub>Radiology</sub>=.12). In neither group did annotations have any meaningful effect on confidence in their own final diagnosis (<italic>P</italic><sub>IM/EM</sub>=.280, <italic>P</italic><sub>Radiology</sub>=.202). The authors did not find convincing evidence for either algorithmic appreciation (a tendency to trust algorithms) or algorithmic aversion (a tendency not to trust algorithms).</p>
      </sec>
      <sec>
        <title>Tabular Data</title>
        <p>The 3 studies using XAI techniques with tabular data found positive relationships between explanations of AI and perceived trust. However, in 2 of the studies, results varied, and the authors argued that an inappropriate use of explanations can induce under- or overtrust.</p>
        <p>A qualitative study by Martinez-Aguero et al [<xref ref-type="bibr" rid="ref34">34</xref>] investigated whether XAI, when compared with AI, increased trust among clinicians searching for multidrug-resistant bacteria in intensive care units. The authors concluded that both visual and textual explanations helped clinicians understand the model output and increased trust in the XAI. However, neither the number of respondents nor the instrument used to measure trust was clearly reported.</p>
        <p>Naiseh et al [<xref ref-type="bibr" rid="ref28">28</xref>] performed a qualitative study on the influence of XAI on the prescribing decisions of physicians and pharmacists in the field of oncology. For the trust, they used the terminology used by Chiou and Lee [<xref ref-type="bibr" rid="ref36">36</xref>] of appropriate reliance. They initially performed semistructured interviews with 16 participants to understand how these providers engaged with 5 distinct types of explanations: local, global, counterfactual, example-based, and confidence-based. The authors coded the providers as exhibiting “high” or “low” trust only if this behavior was consistent across all 5 explanation types in the study. Although the physicians and pharmacists were generally favorable toward explanations, they exhibited a lack of trust and skepticism about XAI’s accuracy. They further identified two primary causes of errors in trust calibration: (1) skipping explanations or (2) misapplication of explanations. Skipping occurred when providers made decisions with AI without fully engaging with the accompanying explanations. This was due to (1) disinterest in understanding the explanation, (2) decision delays due to the explanation, and (3) perceived redundancy, complexity, or context irrelevance. Misapplication occurred when the providers misunderstood the explanations or simply sought after them to confirm their initial judgement. They then conducted codesign sessions with 8 participants. From these, they proposed enhancing XAI interface designs to help avoid skipping or misinterpreting explanations. The designs included active or cognitive engagement of decision-makers in the decision-making process, challenge of habitual actions in the XAI system by introducing alternative perspectives or recommendations that may not align with the clinical decision-maker’s previous experiences or assumptions, friction that requires the decision-maker to confirm their decision before it is implemented, and support consisting of training and learning opportunities for clinical decision-makers to enhance the understanding and usage of the system.</p>
        <p>This same team studied 41 medical practitioners who were frequent users of clinical decision support systems [<xref ref-type="bibr" rid="ref29">29</xref>]. They sought to develop interventions that would enable physicians to have an optimal level of trust (or reliance), as defined by the authors, in predictions by AI models and to avoid errors that might arise from excessive under- or overtrust. The clinicians used 4 different XAI classes (global, local, counterfactual, and example-based; their other study had included confidence-based), and the research group explored the clinicians’ experiences using semistructured interviews. A subsequent mixed methods study on chemotherapy prescriptions found differences in the trust generated by different explanations. Participants found example-based and counterfactual explanations more understandable than the others, but there were no differences in perceptions of technical competence, a view supported in semistructured interviews, largely because they were easier to comprehend. In addition, the researchers identified a potential for overreliance on AI, as providers were more inclined to accept AI recommendations when they were accompanied by explanations, although explanations did not help them identify incorrect recommendations. They made a series of suggestions as to how the interface design might be enhanced, although they also noted that it could be very difficult to incorporate the many different types of questions that users might ask. Some might seek very detailed explanations, while others could be deterred by the resulting cognitive overload. As the authors note, “long and redundant explanations make participants skip them.” Perhaps more fundamentally, several of those interviewed said that they would be reluctant to use this tool because of the high cognitive load involved in seeking to understand some decisions.</p>
      </sec>
      <sec>
        <title>Conceptualizing and Measuring Trust</title>
        <p>The studies that were reviewed take 2 broad approaches to defining trust: cognition-based trust and affect-based trust [<xref ref-type="bibr" rid="ref19">19</xref>]. The initial approach, cognition-based trust, revolves around the perceived clarity and technical ability of XAI, fundamentally grounded in rational analysis. On the other hand, affect-based trust encompasses emotional bonds and beliefs originating from previous experiences and sentiments towards AI, as opposed to logical deliberation. All 10 studies applied cognitive-based trust. However, 2 studies also investigated trust in terms of affect or emotions.</p>
        <p>A total of 8 studies used quantitative surveys to measure trust, integrating them with qualitative interviews in 2 instances. The remaining 2 exclusively used qualitative interviews. We found marked heterogeneity in the questions used.</p>
        <p>Naiseh et al [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>] noted that explanations affected both cognitive and affect-based trust and could result in either overtrust or undertrust. In the 2021 study [<xref ref-type="bibr" rid="ref28">28</xref>], they used qualitative think-aloud methods and suggested that 1 reason for users skipping or misapplying explanations could be that affect-based trust overrides cognitive and deliberate trust. A couple of years later, they published a new study [<xref ref-type="bibr" rid="ref29">29</xref>] in which they investigated whether different XAI classes or methods increased or decreased cognitive-based trust. They found that some types of explanation could introduce a cognitive overreliance on the AI, but they questioned whether biases and affect-based trust also played roles.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>We examined empirical evidence on the impact of explainable AI on physicians’ trust levels and intention to use AI. Out of the 10 studies included, 50% (5/10) reported that XAI increased trust, while 20% (2/10) observed both increased and decreased trust levels. Both overtrust and undertrust appeared to be modifiable by brief cognitive interventions to optimize trust [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. In 2 studies (20%), no effects of XAI were shown, and one study (10%) did not reach any conclusions. Only small differences of no consequence were identified between studies using tabular data formats and image data.</p>
        <p>Before interpreting these findings further, we must note several important limitations of our study’s search strategy. First, there is considerable heterogeneity in the use of the term “trust” and how it is operationalized in health care research. To avoid potentially missing important studies in our search, we adopted a conservative search strategy in which we did not specify trust as a keyword but rather manually searched for all papers, including a broad set of trust-related outcomes. Related to this, the rapid evolution of AI has been associated with conceptual confusion about its meaning. Several recent studies have sought to operationalize AI in markedly varying ways, drawing on technology, for example, which is not actually based on AI algorithms [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. For clarity, we specifically constrained our search to AI algorithms, which used machine-learning techniques. Second, we used 2 main databases of peer-reviewed studies, PubMed and Web of Science. The former has broad coverage in medicine and social sciences but could potentially miss emerging studies in computer science, but Clarivate, which publishes Web of Science, notes that it has “Strongest coverage of natural sciences &#38; engineering, computer science, materials sciences, patents, data sets” [<xref ref-type="bibr" rid="ref39">39</xref>]. We do, however, accept that, in a rapidly developing field, we may have missed material in preprints or non–peer-reviewed conference papers. In addition, for coherence across platforms, we did not use MeSH (Medical Subject Headings) terms in PubMed, as they are not used in Web of Science, and we wanted to achieve consistency. The keyword “clinical” also may potentially have excluded studies in some clinical specialties. However, the vast number of potential specialist terms that could be used makes it virtually impossible to implement a wider strategy in practice. Finally, there has been extensive study of psychological biases in how decision makers, including clinicians, respond to new data and update previous beliefs in incorporating evidence to make decisions [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. Studies by psychologists are needed to evaluate the role these biases (including but not limited to default bias and confirmation bias) play in medical decision-making when using XAI.</p>
        <p>A series of limitations were also identified in the included studies. Generally, the study designs widely varied, from qualitative investigations to experimental quantitative studies, making it difficult to draw direct comparisons. However, we have sought to the extent possible to identify emerging themes and patterns across tabular and visual XAI applications, as well as a series of methodological limitations to address in future studies. In addition, the relatively low number of studies (n=10) limits generalizability to other populations and settings. Another limitation present in several studies was the weak reporting of trust measurement instruments, as well as the number of respondents, particularly in qualitative studies. Few studies have reported the validity of the underlying XAI algorithm, which could also alter health care providers’ engagement and trust in XAI technologies. Future research should seek to improve the reporting of this necessary information.</p>
        <p>Although our review focused on how XAI impacted clinicians’ trust levels and intention to use this technology, a few additional observations are of interest. Gaube et al [<xref ref-type="bibr" rid="ref33">33</xref>] found no difference in trust between experts and nonexperts but reported that the performance of nonexperts who drew upon XAI was superior in clinical practice. Future studies are needed not just to evaluate the impact of XAI on its adoption and trustworthiness but also its potential clinical efficacy. In this context, it is worth noticing that while all included studies offered explanations that could be added to AI predictions, the validity of those explanations has yet to be critically evaluated [<xref ref-type="bibr" rid="ref41">41</xref>] It is unclear how XAI can overcome limitations inherent in clinical domains where mechanistic understanding is lacking. That is, XAI will likely struggle to explain what is currently unexplainable at the frontier of clinical medicine. This could potentially lead to explanations that, albeit perceived as trustworthy, are not founded on established clinical knowledge and instead are “misconceptions” by AI. The XAI explanations are still simplifications of the original AI model, and when the abstraction level is heightened, the granularity is usually reduced.</p>
        <p>This review also points to the need to understand how trust in XAI can be optimized rather than simply being evaluated in terms of increased or decreased with the help of different types of explanations. Clinical decision-making inevitably involves an element of judgment. While AI may be able to process more information than a human, humans may also be able to incorporate insights that are not included in algorithms [<xref ref-type="bibr" rid="ref41">41</xref>]. Thus, the challenge is to achieve an appropriate level of trust in AI, neither too limited, in which case the clinician will be reluctant to use it, nor too extensive, as this may cause experienced clinicians to subordinate their own judgment to the AI outputs.</p>
        <p>Yet, while it is apparent that neither blind trust nor blind distrust may be appropriate, it is unclear what an appropriate or optimal level of trust should be. None of the studies attempted to explore what this should be, which remains an important area for future research. However, the studies reviewed indicated that the levels of trust that health care providers place in AI depend on multiple clinically-relevant factors, including but not limited to the accuracy of the algorithm, the validation, and the potential impact on patients.</p>
        <p>Our study also points to several further directions for future research. First, while the interdisciplinary literature featured prominent computer scientists and clinicians, there was a notable absence of psychologists. There is considerable scope to improve the appropriate uptake and adoption of AI by drawing upon evidence from the wider psychological literature on medical decision-making. One such framework is a dual process model, which integrates both cognitive and affect-based means of decision-making jointly. Kahneman [<xref ref-type="bibr" rid="ref17">17</xref>] argues that the human mind uses 2 processes for decision-making: the fast thinking and intuitive process, including heuristics, biases, and cognitive shortcuts that recall affect-based trust, and the slow thinking and reasoning process that recalls cognitive-based trust. Furthermore, Thaler and Sunstein [<xref ref-type="bibr" rid="ref42">42</xref>] have found that both these processes can be influenced (or nudged), especially the rapid thinking intuitive judgments. Brief cognitive interventions such as nudging have sometimes proven to be useful in health. The extant literature appears to incorporate mainly reasoning-based cognitive markers but misses out on intuitive and emotion-based processes for evaluating trust levels in emerging technologies.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>A majority of the included studies showed that XAI increases clinicians’ trust and intention to use AI; 2 of these studies showed that explanations could both increase and decrease trust and in 3 studies, explanations fell through or did not add any value. However, in health care, when AI tool incorporates associated explanations, they must avoid 2 common psychological pitfalls. First, they must be made sufficiently clear to avoid risks of blind distrust when physicians do not understand them. Second, they must avoid oversimplification and failing to disclose limitations in models that could lead to blind trust among physicians with an artificial level of clinical certainty. Explanations can both increase and decrease trust, and understanding the optimal level of trust in relation to the algorithm’s accuracy will be critical. When AI algorithms surpass physicians in terms of accuracy, the integration could be facilitated through means such as providing explanations. Yet, the provision of explanations is not a failsafe method to detect errors in the algorithms, as it might inadvertently foster excessive trust. How to find an optimal level of trust and how to best communicate AI to physicians will remain a defining health care challenge of our time.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Search strategy.</p>
        <media xlink:href="ai_v3i1e53207_app1.docx" xlink:title="DOCX File , 13 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) checklist.</p>
        <media xlink:href="ai_v3i1e53207_app2.docx" xlink:title="DOCX File , 31 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Assessment of risk of bias.</p>
        <media xlink:href="ai_v3i1e53207_app3.docx" xlink:title="DOCX File , 16 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">IM/EM</term>
          <def>
            <p>internal and emergency medicine physicians</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">MeSH</term>
          <def>
            <p>Medical Subject Headings</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">ML</term>
          <def>
            <p>machine learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">PRISMA</term>
          <def>
            <p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">RoB 2</term>
          <def>
            <p>Risk of Bias 2</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">ROBINS-I</term>
          <def>
            <p>Risk of Bias in Non-randomized Studies of Interventions</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">XAI</term>
          <def>
            <p>explainable artificial intelligence</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The authors have not declared a specific grant for this research from any funding agency in the public, commercial, or not-for-profit sectors. MM’s work on AI is part of the work programme of the European Observatory on Health Systems and Policies.</p>
    </ack>
    <fn-group>
      <fn fn-type="con">
        <p>RR contributed to the idea, collaborated with DS in data collection, performed the review, and drafted the manuscript. All authors contributed to the interpretation, writing, and editing of the manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schwalbe</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wahl</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and the future of global health</article-title>
          <source>Lancet</source>
          <year>2020</year>
          <volume>395</volume>
          <issue>10236</issue>
          <fpage>1579</fpage>
          <lpage>1586</lpage>
          <pub-id pub-id-type="doi">10.1016/S0140-6736(20)30226-9</pub-id>
          <pub-id pub-id-type="medline">32416782</pub-id>
          <pub-id pub-id-type="pii">S0140-6736(20)30226-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC7255280</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rajpurkar</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Banerjee</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Topol</surname>
              <given-names>EJ</given-names>
            </name>
          </person-group>
          <article-title>AI in health and medicine</article-title>
          <source>Nat Med</source>
          <year>2022</year>
          <volume>28</volume>
          <issue>1</issue>
          <fpage>31</fpage>
          <lpage>38</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-021-01614-0</pub-id>
          <pub-id pub-id-type="medline">35058619</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-021-01614-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cutillo</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Foschini</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kundu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mackintosh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mandl</surname>
              <given-names>KD</given-names>
            </name>
            <collab>MI in Healthcare Workshop Working Group</collab>
          </person-group>
          <article-title>Machine intelligence in healthcare-perspectives on trustworthiness, explainability, usability, and transparency</article-title>
          <source>NPJ Digit Med</source>
          <year>2020</year>
          <volume>3</volume>
          <fpage>47</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-020-0254-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-020-0254-2</pub-id>
          <pub-id pub-id-type="medline">32258429</pub-id>
          <pub-id pub-id-type="pii">254</pub-id>
          <pub-id pub-id-type="pmcid">PMC7099019</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ienca</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Don’t pause giant AI for the wrong reasons</article-title>
          <source>Nat Mach Intell</source>
          <year>2023</year>
          <volume>5</volume>
          <issue>5</issue>
          <fpage>470</fpage>
          <lpage>471</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s42256-023-00649-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s42256-023-00649-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Glikson</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Woolley</surname>
              <given-names>AW</given-names>
            </name>
          </person-group>
          <article-title>Human trust in artificial intelligence: review of empirical research</article-title>
          <source>Acad Manag Ann</source>
          <year>2020</year>
          <volume>14</volume>
          <issue>2</issue>
          <fpage>627</fpage>
          <lpage>660</lpage>
          <pub-id pub-id-type="doi">10.5465/annals.2018.0057</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barredo Arrieta</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Díaz-Rodríguez</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Del Ser</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bennetot</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tabik</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Barbado</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Garcia</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gil-Lopez</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Molina</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Benjamins</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chatila</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Herrera</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Explainable artificial intelligence (XAI): concepts, taxonomies, opportunities and challenges toward responsible AI</article-title>
          <source>Inf Fusion</source>
          <year>2020</year>
          <volume>58</volume>
          <fpage>82</fpage>
          <lpage>115</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.inffus.2019.12.012"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.inffus.2019.12.012</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ribeiro</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Guestrin</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>"Why Should I Trust You?": explaining the predictions of any classifier</article-title>
          <year>2016</year>
          <conf-name>Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</conf-name>
          <conf-date>Aug 13, 2016</conf-date>
          <conf-loc>USA</conf-loc>
          <fpage>1135</fpage>
          <lpage>1144</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi-org.esc-web.lib.cbs.dk/10.1145/2939672.2939778"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/2939672.2939778</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>King</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Towards global explanations of convolutional neural networks with concept attribution</article-title>
          <year>2020</year>
          <conf-name>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 13, 2020</conf-date>
          <conf-loc>Seattle, WA, USA</conf-loc>
          <fpage>8652</fpage>
          <lpage>8661</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr42600.2020.00868</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Bellamy</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Effect of confidence and explanation on accuracy and trust calibration in AI-assisted decision making</article-title>
          <year>2020</year>
          <conf-name>Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency</conf-name>
          <conf-date>2020 Jan 27</conf-date>
          <conf-loc>New York</conf-loc>
          <fpage>295</fpage>
          <lpage>305</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi-org.esc-web.lib.cbs.dk/10.1145/3351095.3372852"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3351095.3372852</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>QV</given-names>
            </name>
            <name name-style="western">
              <surname>Gruen</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Questioning the AI: informing design practices for explainable AI user experiences</article-title>
          <year>2020</year>
          <conf-name>Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>Apr 23, 2020</conf-date>
          <conf-loc>USA</conf-loc>
          <publisher-name>Association for Computing Machinery</publisher-name>
          <fpage>1</fpage>
          <lpage>15</lpage>
          <pub-id pub-id-type="doi">10.1145/3313831.3376590</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mechanic</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The functions and limitations of trust in the provision of medical care</article-title>
          <source>J Health Polit Policy Law</source>
          <year>1998</year>
          <volume>23</volume>
          <issue>4</issue>
          <fpage>661</fpage>
          <lpage>686</lpage>
          <pub-id pub-id-type="doi">10.1215/03616878-23-4-661</pub-id>
          <pub-id pub-id-type="medline">9718518</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fukuyama</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <source>Trust: The Social Virtues and the Creation of Prosperity</source>
          <year>1996</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Simon and Schuster</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seligman</surname>
              <given-names>AB</given-names>
            </name>
          </person-group>
          <source>The Problem of Trust</source>
          <year>2000</year>
          <publisher-loc>United States</publisher-loc>
          <publisher-name>Princeton University Press</publisher-name>
          <fpage>240</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arrow</surname>
              <given-names>KJ</given-names>
            </name>
          </person-group>
          <article-title>Uncertainty and the welfare economics of medical care</article-title>
          <source>Uncertainty in Economics</source>
          <year>1978</year>
          <publisher-loc>USA</publisher-loc>
          <publisher-name>Elsevier</publisher-name>
          <fpage>345</fpage>
          <lpage>375</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Berg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dickhaut</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>McCabe</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Trust, reciprocity, and social history</article-title>
          <source>Games and Economic Behavior</source>
          <year>1995</year>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>122</fpage>
          <lpage>142</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1006/game.1995.1027"/>
          </comment>
          <pub-id pub-id-type="doi">10.1006/game.1995.1027</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McKee</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schalkwyk</surname>
              <given-names>MCV</given-names>
            </name>
            <name name-style="western">
              <surname>Greenley</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Permanand</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Placing trust at the heart of health policy and systems</article-title>
          <source>Int J Health Policy Manag</source>
          <year>2024</year>
          <volume>13</volume>
          <fpage>8410</fpage>
          <pub-id pub-id-type="doi">10.34172/ijhpm.2024.8410</pub-id>
          <pub-id pub-id-type="medline">39099501</pub-id>
          <pub-id pub-id-type="pii">8410</pub-id>
          <pub-id pub-id-type="pmcid">PMC11270596</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kahneman</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <source>Thinking, Fast and Slow</source>
          <year>2011</year>
          <publisher-loc>New York, United States</publisher-loc>
          <publisher-name>Macmillan</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lewicki</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Brinsfield</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <source>Framing Trust: Trust as a Heuristic</source>
          <year>2011</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Fram Matters Perspect Negot Res Pract Commun</publisher-name>
          <fpage>110</fpage>
          <lpage>135</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Madsen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gregor</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Measuring human-computer trust</article-title>
          <year>2000</year>
          <conf-name>Proceedings of the 11th Australasian Conference on Information Systems</conf-name>
          <conf-date>Jan 10, 2000</conf-date>
          <conf-loc>Australia</conf-loc>
          <fpage>6</fpage>
          <lpage>8</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Essential properties and explanation effectiveness of explainable artificial intelligence in healthcare: a systematic review</article-title>
          <source>Heliyon</source>
          <year>2023</year>
          <volume>9</volume>
          <issue>5</issue>
          <fpage>e16110</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2405-8440(23)03317-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.heliyon.2023.e16110</pub-id>
          <pub-id pub-id-type="medline">37234618</pub-id>
          <pub-id pub-id-type="pii">S2405-8440(23)03317-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC10205582</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nazar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Alam</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Yafi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Su'ud</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>A systematic review of human–computer interaction and explainable artificial intelligence in healthcare with artificial intelligence techniques</article-title>
          <source>IEEE Access</source>
          <year>2021</year>
          <volume>9</volume>
          <fpage>153316</fpage>
          <lpage>153348</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2021.3127881</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Antoniadi</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Guendouz</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mazo</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Becker</surname>
              <given-names>BA</given-names>
            </name>
            <name name-style="western">
              <surname>Mooney</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Current challenges and future opportunities for XAI in machine learning-based clinical decision support systems: a systematic review</article-title>
          <source>Applied Sciences</source>
          <year>2021</year>
          <volume>11</volume>
          <issue>11</issue>
          <fpage>5088</fpage>
          <pub-id pub-id-type="doi">10.3390/app11115088</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Giuste</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Naren</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Isgut</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sha</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Tong</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gupte</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>Explainable artificial intelligence methods in combating pandemics: a systematic review</article-title>
          <source>IEEE Rev Biomed Eng</source>
          <year>2023</year>
          <volume>16</volume>
          <fpage>5</fpage>
          <lpage>21</lpage>
          <pub-id pub-id-type="doi">10.1109/RBME.2022.3185953</pub-id>
          <pub-id pub-id-type="medline">35737637</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Page</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>McKenzie</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Bossuyt</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Boutron</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hoffmann</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>Mulrow</surname>
              <given-names>CD</given-names>
            </name>
            <name name-style="western">
              <surname>Shamseer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tetzlaff</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Akl</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Brennan</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Chou</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Glanville</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Grimshaw</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Hróbjartsson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lalu</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Loder</surname>
              <given-names>EW</given-names>
            </name>
            <name name-style="western">
              <surname>Mayo-Wilson</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>McDonald</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>McGuinness</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tricco</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Welch</surname>
              <given-names>VA</given-names>
            </name>
            <name name-style="western">
              <surname>Whiting</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title>
          <source>Int J Surg</source>
          <year>2021</year>
          <volume>88</volume>
          <fpage>105906</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1743-9191(21)00040-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ijsu.2021.105906</pub-id>
          <pub-id pub-id-type="medline">33789826</pub-id>
          <pub-id pub-id-type="pii">S1743-9191(21)00040-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="web">
          <source>Trust: The foundation of health systems</source>
          <access-date>2024-03-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://eurohealthobservatory.who.int/publications/i/trust-the-foundation-of-health-systems">https://eurohealthobservatory.who.int/publications/i/trust-the-foundation-of-health-systems</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cabitza</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Campagner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sconfienza</surname>
              <given-names>LM</given-names>
            </name>
          </person-group>
          <article-title>As if sand were stone. New concepts and metrics to probe the ground on which to build trustable ai</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>1</issue>
          <fpage>219</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://air.unimi.it/handle/2434/764720"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12911-020-01224-9</pub-id>
          <pub-id pub-id-type="medline">32917183</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12911-020-01224-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC7488864</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Manikandan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kose</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Satapathy</surname>
              <given-names>SC</given-names>
            </name>
          </person-group>
          <article-title>Doctor's dilemma: evaluating an explainable subtractive spatial lightweight convolutional neural network for brain tumor diagnosis</article-title>
          <source>ACM Trans. Multimedia Comput. Commun. Appl</source>
          <year>2021</year>
          <volume>17</volume>
          <issue>3s</issue>
          <fpage>1</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.1145/3457187</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Naiseh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Thani</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Explainable recommendation: when design meets trust calibration</article-title>
          <source>World Wide Web</source>
          <year>2021</year>
          <volume>24</volume>
          <issue>5</issue>
          <fpage>1857</fpage>
          <lpage>1884</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34366701"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11280-021-00916-0</pub-id>
          <pub-id pub-id-type="medline">34366701</pub-id>
          <pub-id pub-id-type="pii">916</pub-id>
          <pub-id pub-id-type="pmcid">PMC8327305</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Naiseh</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Thani</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>How the different explanation classes impact trust calibration: the case of clinical decision support systems</article-title>
          <source>International Journal of Human-Computer Studies</source>
          <year>2023</year>
          <volume>169</volume>
          <fpage>102941</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2022.102941</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Evans</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Retzlaff</surname>
              <given-names>CO</given-names>
            </name>
            <name name-style="western">
              <surname>Geißler</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kargl</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Plass</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kiehl</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zerbe</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Holzinger</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>The explainability paradox: challenges for xAI in digital pathology</article-title>
          <source>Future Generation Computer Systems</source>
          <year>2022</year>
          <volume>133</volume>
          <fpage>281</fpage>
          <lpage>296</lpage>
          <pub-id pub-id-type="doi">10.1016/j.future.2022.03.009</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kaur</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Uslu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Durresi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>Trustworthy AI Explanations as an Interface in Medical Diagnostic Systems</source>
          <year>2022</year>
          <publisher-loc>Bloomington, Indiana</publisher-loc>
          <publisher-name>Indiana University System</publisher-name>
          <fpage>119</fpage>
          <lpage>130</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rainey</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>O'Regan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Matthew</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Skelton</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Woznitza</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Chu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>McConnell</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hughes</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Bond</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Malamateniou</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>McFadden</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>UK reporting radiographers' perceptions of AI in radiographic image interpretation - Current perspectives and future developments</article-title>
          <source>Radiography (Lond)</source>
          <year>2022</year>
          <volume>28</volume>
          <issue>4</issue>
          <fpage>881</fpage>
          <lpage>888</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1078-8174(22)00075-X"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.radi.2022.06.006</pub-id>
          <pub-id pub-id-type="medline">35780627</pub-id>
          <pub-id pub-id-type="pii">S1078-8174(22)00075-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gaube</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Suresh</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Raue</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lermer</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Koch</surname>
              <given-names>TK</given-names>
            </name>
            <name name-style="western">
              <surname>Hudecek</surname>
              <given-names>MFC</given-names>
            </name>
            <name name-style="western">
              <surname>Ackery</surname>
              <given-names>AD</given-names>
            </name>
            <name name-style="western">
              <surname>Grover</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>Coughlin</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Frey</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kitamura</surname>
              <given-names>FC</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Colak</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Non-task expert physicians benefit from correct explainable AI advice when reviewing X-rays</article-title>
          <source>Sci Rep</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>1383</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-023-28633-w"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-023-28633-w</pub-id>
          <pub-id pub-id-type="medline">36697450</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-023-28633-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC9876883</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Martínez-Agüero</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Soguero-Ruiz</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Alonso-Moral</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Mora-Jiménez</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Álvarez-Rodríguez</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Marques</surname>
              <given-names>AG</given-names>
            </name>
          </person-group>
          <article-title>Interpretable clinical time-series modeling with intelligent feature selection for early prediction of antimicrobial multidrug resistance</article-title>
          <source>Future Generation Computer Systems</source>
          <year>2022</year>
          <volume>133</volume>
          <fpage>68</fpage>
          <lpage>83</lpage>
          <pub-id pub-id-type="doi">10.1016/j.future.2022.02.021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>CF</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>ZC</given-names>
            </name>
            <name name-style="western">
              <surname>Kuo</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>TC</given-names>
            </name>
          </person-group>
          <article-title>Does AI explainability affect physicians' intention to use AI?</article-title>
          <source>Int J Med Inform</source>
          <year>2022</year>
          <volume>168</volume>
          <fpage>104884</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2022.104884</pub-id>
          <pub-id pub-id-type="medline">36228415</pub-id>
          <pub-id pub-id-type="pii">S1386-5056(22)00198-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chiou</surname>
              <given-names>EK</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JD</given-names>
            </name>
          </person-group>
          <article-title>Trusting automation: designing for responsivity and resilience</article-title>
          <source>Hum Factors</source>
          <year>2023</year>
          <volume>65</volume>
          <issue>1</issue>
          <fpage>137</fpage>
          <lpage>165</lpage>
          <pub-id pub-id-type="doi">10.1177/00187208211009995</pub-id>
          <pub-id pub-id-type="medline">33906505</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Broussard</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <source>Artificial Unintelligence: How Computers Misunderstand the World</source>
          <year>2018</year>
          <publisher-loc>Cambridge, Massachusetts</publisher-loc>
          <publisher-name>The MIT Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in healthcare: past, present and future</article-title>
          <source>Stroke Vasc Neurol</source>
          <year>2017</year>
          <volume>2</volume>
          <issue>4</issue>
          <fpage>230</fpage>
          <lpage>243</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://svn.bmj.com/lookup/pmidlookup?view=long&#38;pmid=29507784"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/svn-2017-000101</pub-id>
          <pub-id pub-id-type="medline">29507784</pub-id>
          <pub-id pub-id-type="pii">svn-2017-000101</pub-id>
          <pub-id pub-id-type="pmcid">PMC5829945</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Matthews</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <source>LibGuides: Resources for Librarians: Web of Science Coverage Details</source>
          <access-date>2023-09-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://clarivate.libguides.com/librarianresources/coverage">https://clarivate.libguides.com/librarianresources/coverage</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kliegr</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Bahník</surname>
              <given-names>?</given-names>
            </name>
            <name name-style="western">
              <surname>Fürnkranz</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A review of possible effects of cognitive biases on interpretation of rule-based machine learning models</article-title>
          <source>Artificial Intelligence</source>
          <year>2021</year>
          <volume>295</volume>
          <fpage>103458</fpage>
          <pub-id pub-id-type="doi">10.1016/j.artint.2021.103458</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sanchez-Martinez</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Camara</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Piella</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Cikes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>González-Ballester</surname>
              <given-names>MÁ</given-names>
            </name>
            <name name-style="western">
              <surname>Miron</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vellido</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gómez</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Fraser</surname>
              <given-names>AG</given-names>
            </name>
            <name name-style="western">
              <surname>Bijnens</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Machine learning for clinical decision-making: challenges and opportunities in cardiovascular imaging</article-title>
          <source>Front Cardiovasc Med</source>
          <year>2021</year>
          <volume>8</volume>
          <fpage>765693</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35059445"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fcvm.2021.765693</pub-id>
          <pub-id pub-id-type="medline">35059445</pub-id>
          <pub-id pub-id-type="pmcid">PMC8764455</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thaler</surname>
              <given-names>RH</given-names>
            </name>
            <name name-style="western">
              <surname>Sunstein</surname>
              <given-names>CR</given-names>
            </name>
          </person-group>
          <source>Nudge: Improving Decisions about Health, Wealth, and Happiness</source>
          <year>2009</year>
          <publisher-loc>Westminster, London</publisher-loc>
          <publisher-name>Penguin Publishing Group</publisher-name>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
