<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR AI</journal-id><journal-id journal-id-type="publisher-id">ai</journal-id><journal-id journal-id-type="index">41</journal-id><journal-title>JMIR AI</journal-title><abbrev-journal-title>JMIR AI</abbrev-journal-title><issn pub-type="epub">2817-1705</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v5i1e72210</article-id><article-id pub-id-type="doi">10.2196/72210</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>Exploring Clinician Perspectives on Artificial Intelligence in Primary Care: Qualitative Systematic Review and Meta-Synthesis</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Bogdanffy</surname><given-names>Robin</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Mundzic</surname><given-names>Alisa</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Nymberg</surname><given-names>Peter</given-names></name><degrees>PhD, RN</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Sundemo</surname><given-names>David</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Moberg</surname><given-names>Anna</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wikberg</surname><given-names>Carl</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gunnarsson</surname><given-names>Ronny Kent</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff6">6</xref><xref ref-type="aff" rid="aff7">7</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wid&#x00E9;n</surname><given-names>Jonathan</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Sundvall</surname><given-names>P&#x00E4;r-Daniel</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Entezarjou</surname><given-names>Artin</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>General Practice/Family Medicine, School of Public Health and Community Medicine, Institute of Medicine, Sahlgrenska Academy, University of Gothenburg</institution><addr-line>Huvudbyggnad Vasaparken, Universitetsplatsen 1</addr-line><addr-line>Gothenburg</addr-line><country>Sweden</country></aff><aff id="aff2"><institution>Center for Primary Health Care Research, Department of Clinical Sciences, Malm&#x00F6;, Lund University</institution><addr-line>Malm&#x00F6;</addr-line><country>Sweden</country></aff><aff id="aff3"><institution>University Clinic Primary Care Sk&#x00E5;ne, Region Sk&#x00E5;ne</institution><addr-line>Malm&#x00F6;</addr-line><country>Sweden</country></aff><aff id="aff4"><institution>Center for Digital Health, Sahlgrenska University Hospital, Region V&#x00E4;stra G&#x00F6;taland</institution><addr-line>M&#x00F6;lndal</addr-line><country>Sweden</country></aff><aff id="aff5"><institution>Department of Health, Medicine and Caring Sciences, Faculty of Medicine and Health Sciences, Linko&#x0308;ping University</institution><addr-line>Link&#x00F6;ping</addr-line><country>Sweden</country></aff><aff id="aff6"><institution>Research, Education, Development &#x0026; Innovation, Primary Health Care, Region Vastra Gotaland</institution><addr-line>Gothenburg</addr-line><country>Sweden</country></aff><aff id="aff7"><institution>College of Medicine and Dentistry, James Cook University</institution><addr-line>Cairns</addr-line><country>Australia</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Emam</surname><given-names>Khaled El</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Rodriguez-Suarez</surname><given-names>Claudio-Alberto</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Kbaier</surname><given-names>Dhouha</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Robin Bogdanffy, MD, General Practice/Family Medicine, School of Public Health and Community Medicine, Institute of Medicine, Sahlgrenska Academy, University of Gothenburg, Huvudbyggnad Vasaparken, Universitetsplatsen 1, Gothenburg, 40530, Sweden, 46 317860000; <email>robin.bogdanffy@gu.se</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>5</day><month>2</month><year>2026</year></pub-date><volume>5</volume><elocation-id>e72210</elocation-id><history><date date-type="received"><day>06</day><month>02</month><year>2025</year></date><date date-type="rev-recd"><day>10</day><month>01</month><year>2026</year></date><date date-type="accepted"><day>10</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Robin Bogdanffy, Alisa Mundzic, Peter Nymberg, David Sundemo, Anna Moberg, Carl Wikberg, Ronny Kent Gunnarsson, Jonathan Wid&#x00E9;n, P&#x00E4;r-Daniel Sundvall, Artin Entezarjou. Originally published in JMIR AI (<ext-link ext-link-type="uri" xlink:href="https://ai.jmir.org">https://ai.jmir.org</ext-link>), 5.2.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.ai.jmir.org/">https://www.ai.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://ai.jmir.org/2026/1/e72210"/><abstract><sec><title>Background</title><p>Recent advances have highlighted the potential of artificial intelligence (AI) systems to assist clinicians with administrative and clinical tasks, but concerns regarding biases, lack of regulation, and potential technical issues pose significant challenges. The lack of a clear definition of AI, combined with limited focus on qualitative research exploring clinicians' perspectives, has limited the understanding of perspectives on AI in primary health care settings.</p></sec><sec><title>Objective</title><p>This review aims to synthesize current qualitative research on the perspectives of clinicians on AI in primary care settings.</p></sec><sec sec-type="methods"><title>Methods</title><p>A systematic search was conducted in MEDLINE (PubMed), Scopus, Web of Science, and CINAHL (EBSCOhost) databases for publications from inception to February 5, 2024. The search strategy was designed using the Sample, Phenomenon of Interest, Design, Evaluation, and Research type (SPIDER) framework. Studies were eligible if they were published in English, peer-reviewed, and provided qualitative analyses of clinician perspectives on AI in primary health care. Studies were excluded if they were gray literature, used questionnaires, surveys, or similar methods for data collection, or if the perspectives of clinicians were not distinguishable from those of nonclinicians. A qualitative systematic review and thematic synthesis were performed. The Grading of Recommendations Assessment, Development and Evaluation-Confidence in Evidence from Reviews of Qualitative Research (GRADE-CERQual) approach was used to assess confidence in the findings. The CASP (Critical Appraisal Skills Program) checklist for qualitative research was used for risk-of-bias and quality appraisal.</p></sec><sec sec-type="results"><title>Results</title><p>A total of 1492 records were identified, of which 13 studies from 6 countries were included, representing qualitative data from 238 primary care physicians, nurses, physiotherapists, and other health care professionals providing direct patient care. Eight descriptive themes were identified and synthesized into 3 analytical themes using thematic synthesis: (1) the human-machine relationship, describing clinicians&#x2019; thoughts on AI assistance in administration and clinical work, interactions between clinicians, patients, and AI, and resistance and skepticism toward AI; (2) the technologically enhanced clinic, highlighting the effects of AI on the workplace, fear of errors, and desired features; and (3) the societal impact of AI, reflecting concerns about data privacy, medicolegal liability, and bias. GRADE-CERQual assessment rated confidence as high in 15 findings, moderate in 5 findings, and low in 1 finding.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Clinicians view AI as a technology that can both enhance and complicate primary health care. While AI can provide substantial support, its integration into health care requires careful consideration of ethical implications, technical reliability, and the maintenance of human oversight. Interpretation is constrained by heterogeneity in qualitative methods and the diversity of AI technologies examined across studies. More in-depth qualitative research on the effects of AI on clinicians&#x2019; careers and autonomy could prove helpful for the future development of AI systems.</p></sec><sec><title>Trial Registration</title><p>PROSPERO CRD42024505209; https://www.crd.york.ac.uk/PROSPERO/view/CRD42024505209</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>large language models</kwd><kwd>natural language processing</kwd><kwd>generative artificial intelligence</kwd><kwd>primary health care</kwd><kwd>attitude of health personnel</kwd><kwd>systematic reviews as topic</kwd><kwd>qualitative research.</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Health care systems worldwide are increasingly strained, partly due to aging populations and insufficient resources, and there is increased demand for accessibility, medical quality, and economic efficiency [<xref ref-type="bibr" rid="ref1">1</xref>]. Primary care is regarded as a cornerstone in health care systems across many regions of the world [<xref ref-type="bibr" rid="ref2">2</xref>], and primary care clinicians&#x2019; job satisfaction is considered essential for many health care systems [<xref ref-type="bibr" rid="ref3">3</xref>]. Recent studies have demonstrated the potential of artificial intelligence (AI) tools and systems to reduce burnout and increase the efficiency of health care professionals [<xref ref-type="bibr" rid="ref4">4</xref>], as well as to improve diagnostic accuracy and patient care [<xref ref-type="bibr" rid="ref5">5</xref>].</p><p>AI is an emerging technology with a broad range of applications [<xref ref-type="bibr" rid="ref6">6</xref>-<xref ref-type="bibr" rid="ref8">8</xref>]. However, there is still no consensus on a general definition of AI, which presents an obstacle to investigating peoples&#x2019; perspectives [<xref ref-type="bibr" rid="ref9">9</xref>].</p><p>Recent advances in AI have led to increased health care&#x2013;related AI use and research [<xref ref-type="bibr" rid="ref10">10</xref>]. Previous reports have indicated that the main applications of AI in primary health care have been data extraction and processing [<xref ref-type="bibr" rid="ref11">11</xref>], reducing administrative burden [<xref ref-type="bibr" rid="ref12">12</xref>], and assisting physicians in diagnosing, determining a prognosis, and choosing a treatment [<xref ref-type="bibr" rid="ref13">13</xref>]. Current large language models (LLMs) have started to play a more prominent role in health care, and new applications are frequently identified [<xref ref-type="bibr" rid="ref14">14</xref>]. Several LLM products, including Chat Generative Pretrained Transformer (GPT), have demonstrated the capability of medical reasoning and have performed well on medical licensing exams [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Moreover, LLMs may improve communication between health care professionals and patients through text simplification [<xref ref-type="bibr" rid="ref17">17</xref>].</p><p>Previous research suggests concerns among clinicians regarding the use of AI in health care, such as demographic biases, insufficient regulation, lack of trust in AI systems [<xref ref-type="bibr" rid="ref18">18</xref>], and automation bias [<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>While there seems to be a lack of systematic synthesis on clinicians&#x2019; perspectives on AI in primary health care, a scoping review conducted in 2022 on perceptions and needs of AI in health care identified few studies within primary health care. End-user and stakeholder opinions are essential for future implementation and development. Since research on AI in primary care is limited and results are varied, perceptions of the use of AI in this domain are not fully understood [<xref ref-type="bibr" rid="ref7">7</xref>].</p></sec><sec id="s1-2"><title>Definitions</title><sec id="s1-2-1"><title>Definition of Clinicians</title><p>In this review, we refer to health care professionals who provide direct patient care (eg, physicians, nurses, physiotherapists) as clinicians.</p></sec><sec id="s1-2-2"><title>AI Definitions</title><p>Different AI systems vary in their levels of autonomy and adaptiveness after deployment [<xref ref-type="bibr" rid="ref20">20</xref>]. For broad inclusion, this review included any AI system or concept specified by the study authors as AI. This includes LLMs, generative AI (GAI), natural language processing (NLP), and clinical decision support systems (CDSS). The definitions of these model types are complex, and overlap exists; LLMs are language models trained on large amounts of data and are created to process and generate human language based on prompts created by the user, sometimes operating as GAI or as the core of a CDSS [<xref ref-type="bibr" rid="ref21">21</xref>]. GAI refers to AI which is capable of generating content, such as text, images, or audio, some of which are based on LLMs. Current GAI system examples are GPT-4, Copilot, and DALL-E 2 [<xref ref-type="bibr" rid="ref22">22</xref>]. The term NLP encompasses computational techniques designed for the automatic analysis and representation of language [<xref ref-type="bibr" rid="ref23">23</xref>]. A CDSS is an information system that generates specific clinical recommendations through certain software-based algorithms [<xref ref-type="bibr" rid="ref24">24</xref>]. An illustration of key concepts of AI and machine learning (ML) is provided in <xref ref-type="fig" rid="figure1">Figure 1</xref> [<xref ref-type="bibr" rid="ref25">25</xref>].</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Conceptual hierarchy of AI domains. AI: artificial intelligence; DL: deep learning; GenAI: generative artificial intelligence; LLM: large language model; ML: machine learning; NLP: natural language processing; NN: neural network.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e72210_fig01.png"/></fig></sec></sec><sec id="s1-3"><title>Objective</title><p>The aim of this systematic review is to synthesize the current qualitative research on clinicians&#x2019; perspectives on AI in primary care settings.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design</title><p>We performed a systematic review and metasynthesis in accordance with the Cochrane Qualitative and Implementation Methods Group [<xref ref-type="bibr" rid="ref26">26</xref>]. The review was reported according to the Enhanced Transparency in Reporting the Synthesis of Qualitative Research (ENTREQ) statement [<xref ref-type="bibr" rid="ref27">27</xref>] (<xref ref-type="supplementary-material" rid="app5">Checklist 1</xref>) and Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) guidelines [<xref ref-type="bibr" rid="ref28">28</xref>] (<xref ref-type="supplementary-material" rid="app6">Checklist 2</xref>). The study protocol was registered with the International Prospective Register for Systematic Reviews (PROSPERO; CRD42024505209) [<xref ref-type="bibr" rid="ref29">29</xref>] before conducting the review. This study employed a methodology for a systematic review of qualitative studies, in which the authors conducted a secondary qualitative synthesis of published clinician quotes and primary authors&#x2019; interpretations from the reviewed studies, allowing for deeper exploration of underlying patterns and themes.</p></sec><sec id="s2-2"><title>Search Strategy</title><sec id="s2-2-1"><title>Overview</title><p>The search strategy was developed using the Sample, Phenomenon of Interest, Design, Evaluation, and Research type (SPIDER) framework [<xref ref-type="bibr" rid="ref30">30</xref>]: clinicians in primary care (Sample); their perspectives and experiences regarding AI (Phenomenon of Interest); explored through qualitative study designs (Design); focusing on evaluations of experiences, attitudes, perspectives, and views (Evaluation), within qualitative and mixed methods research (Research type). Search strings were designed by the author team and reviewed by a health sciences librarian at the Gothenburg University Library. Broader terms for &#x201C;primary health care,&#x201D; &#x201C;artificial intelligence,&#x201D; and &#x201C;perspectives&#x201D; were combined. Controlled vocabulary and free-text terms were used (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). A systematic search was conducted in MEDLINE (PubMed), Scopus, Web of Science, and CINAHL (EBSCOhost) databases for publications from inception to February 5, 2024. Backward citation searching of the reference lists of the included articles was also performed. Search strings were modified according to the requirements of each database. All searches were performed independently by authors RB and AM and reviewed by author AE. Search documentation is presented in accordance with the Preferred Reporting Items for Systematic Reviews and Meta-Analyses Search (PRISMA-S) checklist [<xref ref-type="bibr" rid="ref31">31</xref>] (<xref ref-type="supplementary-material" rid="app7">Checklist 3</xref>).</p></sec><sec id="s2-2-2"><title>Inclusion Criteria</title><p>Studies were included if they were conducted in a primary health care setting, involved clinicians such as doctors, nurses, physiotherapists, or other health care professionals providing direct patient care, and explored any perspectives on AI in primary health care. For the purposes of this review, studies were considered to be conducted in a primary health care setting if participants were recruited via primary care services, had documented interaction with primary care, or if the study context clearly reflected a primary care environment such as general practice or family medicine. Only qualitative and mixed methods studies published in English in peer-reviewed scientific journals were eligible for inclusion.</p></sec><sec id="s2-2-3"><title>Exclusion Criteria</title><p>Studies were excluded if they lacked sufficient qualitative depth, such as those using only questionnaires, surveys, or similar methods for data collection. We also excluded studies in which qualitative data on clinicians&#x2019; perspectives were not clearly distinguishable from those of nonclinicians, as well as grey literature and unpublished materials.</p></sec></sec><sec id="s2-3"><title>Study Selection</title><p>Authors RB and AM imported the search results into Rayyan (citation manager) [<xref ref-type="bibr" rid="ref32">32</xref>], where duplicates were removed. The authors independently screened titles and abstracts of the remaining articles against the inclusion and exclusion criteria. Any disagreements were discussed, and if consensus was not reached, a third author (AE) was consulted for a final decision. We included articles claiming to evaluate AI technology based on the authors&#x2019; definition of AI, as described in the &#x201C;Introduction.&#x201D;</p></sec><sec id="s2-4"><title>Critical Appraisal</title><p>Authors RB and AM independently conducted critical appraisal using the Critical Appraisal Skills Program (CASP) checklist for qualitative research [<xref ref-type="bibr" rid="ref33">33</xref>]. Disagreements were discussed until a consensus was reached or author AE was consulted for a final decision.</p></sec><sec id="s2-5"><title>Data Analysis and Synthesis</title><p>Data were extracted from the Results section of the included articles and their supplementary material. Participant quotes and authors&#x2019; findings were analyzed independently by RB and AM to generate descriptive themes using thematic analysis according to the Braun and Clarke method [<xref ref-type="bibr" rid="ref34">34</xref>]. This involved several steps through a primarily inductive analytic process. First, the authors familiarized themselves with the extracted data by reading it several times. RB then developed codes using line-by-line coding of words or sentences considered meaningful, using the NVivo software [<xref ref-type="bibr" rid="ref35">35</xref>]. Data extraction and coding were performed in 2 stages. The first stage involved articles solely containing primary care clinician perspectives, and the second stage involved articles containing perspectives of both primary care clinicians and nonclinical health care professionals. Qualitative data with perspectives other than that of clinicians was not coded. Codes were discussed by both authors until an agreement was reached, whereafter, they were exported to a Microsoft Excel spreadsheet. RB then proceeded to generate descriptive themes by grouping codes. The alignment of codes to certain themes was discussed, and the descriptive themes were refined. Thematic synthesis, according to the Thomas and Harden method, was employed to develop higher-order analytical themes. It is a well-suited method for exploring qualitative data such as perspectives or sentiments [<xref ref-type="bibr" rid="ref36">36</xref>]. Thematic synthesis was accomplished through a discussion between both authors, during which the analytical themes were developed and named (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). No new themes emerged from coding the articles with mixed perspectives. This method was chosen due to its ability to identify recurring themes and patterns across multiple studies, enhancing the breadth of the analysis.</p></sec><sec id="s2-6"><title>Assessment of Confidence in the Evidence</title><p>Confidence in each synthesized finding was assessed using the Grading of Recommendations Assessment, Development and Evaluation-Confidence in Evidence from Reviews of Qualitative Research (GRADE-CERQual) approach. The GRADE-CERQual approach was chosen as it explicitly addresses qualitative evidence synthesis, allowing systematic and transparent assessments of the confidence in each thematic finding. Authors RB and AM independently evaluated each finding based on 4 components: methodological limitations, coherence, adequacy, and relevance. Each component was assessed as having no or very minor, minor, moderate, or serious concerns. Discrepancies were discussed, and if agreement was not reached, author AE was consulted for a final decision. Each finding began with an initial rating of &#x201C;high confidence&#x201D;. Confidence levels were then potentially downgraded to moderate, low, or very low based on the severity and number of concerns present in each component. Typically, one level of downgrading (eg, from high to moderate confidence) was applied when moderate concerns were identified in one component combined with minor concerns in other components, and two levels (eg, from high to low confidence) were applied when serious concerns or multiple moderate concerns were present. When concerns were minor or very minor, no downgrading was performed [<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref42">42</xref>].</p></sec><sec id="s2-7"><title>Ethical Considerations</title><p>Because this systematic review used only pre-existing data, ethical approval was not required.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Search Results and Selection</title><p>The final search generated 1492 results, and 415/1492 (27.8%) duplicates were excluded. The remaining 1077/1492 (72.2%) articles were screened by title and abstract, and 54/1077 (5%) articles were retrieved in full text and evaluated, of which 42/54 (77.8%) were excluded based on the exclusion criteria. Finally, 12/54 (22.2%) articles were included from the screening, and 1 additional article was identified from the reference lists of the previously included articles, resulting in a total of 13 studies [<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref55">55</xref>] (<xref ref-type="fig" rid="figure2">Figure 2</xref>).</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) flow diagram of study selection.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e72210_fig02.png"/></fig><p>The 13 included studies were conducted in 6 different countries. Australia was the most frequent location with 4/13 (30.8%) studies [<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref49">49</xref>], followed by Canada with 3/13 (23.1%) studies [<xref ref-type="bibr" rid="ref51">51</xref>-<xref ref-type="bibr" rid="ref53">53</xref>] and the United States with 3/13 (23.1%) studies [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Sweden [<xref ref-type="bibr" rid="ref55">55</xref>], the Netherlands [<xref ref-type="bibr" rid="ref54">54</xref>], and Germany [<xref ref-type="bibr" rid="ref48">48</xref>] each contributed 1/13 (7.7%) study. Many studies used semistructured interviews for data collection (6/13, 46.2%) [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Mixed methods were used in 3/13 (23.1%) studies [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref50">50</xref>], focus groups in 2/13 (15.4%) studies [<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref54">54</xref>], deliberative dialogue in 1/13 (7.7%) study [<xref ref-type="bibr" rid="ref52">52</xref>], and a co-design workshop in 1/13 (7.7%) study [<xref ref-type="bibr" rid="ref46">46</xref>]. Characteristics of the included studies are presented in <xref ref-type="table" rid="table1">Table 1</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Characteristics of the included studies.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">Country</td><td align="left" valign="bottom">Method</td><td align="left" valign="bottom">Characteristics of participants</td><td align="left" valign="bottom">Occupation</td><td align="left" valign="bottom">Type of AI</td><td align="left" valign="bottom">Identified themes</td></tr></thead><tbody><tr><td align="left" valign="top">Davis et al [<xref ref-type="bibr" rid="ref43">43</xref>]</td><td align="left" valign="top">USA</td><td align="left" valign="top">Semistructured interviews</td><td align="left" valign="top">n=10<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">Mixed<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">Machine learning</td><td align="left" valign="top">Acceptability, Clinical Utility, Privacy, Data and Evidence, Clarification/Confusion, Communication, Patient, Family and Provider Characteristics and Experiences, Inner Setting, Outer Setting, Suggestions</td></tr><tr><td align="left" valign="top">Litvin et al [<xref ref-type="bibr" rid="ref44">44</xref>]</td><td align="left" valign="top">USA</td><td align="left" valign="top">Mixed methods</td><td align="left" valign="top">n=39<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">Mixed<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">CDSS<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td><td align="left" valign="top">Provider factors related to CDSS<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup> adoption, Patient factors related to CDSS adoption, Technical factors related to CDSS adoption, Organizational factors related to CDSS adoption</td></tr><tr><td align="left" valign="top">Navarro et al [<xref ref-type="bibr" rid="ref45">45</xref>]</td><td align="left" valign="top">Australia</td><td align="left" valign="top">Semistructured interviews</td><td align="left" valign="top">n=10<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender-men: n=7, women: n=3</td><td align="left" valign="top">General practitioners</td><td align="left" valign="top">Natural language processing</td><td align="left" valign="top">Doctor-AI<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup> collaboration, Desired features, Concerns and challenges, Consultation of the future</td></tr><tr><td align="left" valign="top">Kocaballi et al [<xref ref-type="bibr" rid="ref46">46</xref>]</td><td align="left" valign="top">Australia</td><td align="left" valign="top">Co-design workshop</td><td align="left" valign="top">n=16<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender-men: n=10, women: n=6</td><td align="left" valign="top">General practitioners</td><td align="left" valign="top">Generative AI</td><td align="left" valign="top">Professional autonomy, Human-AI collaboration, New models of care</td></tr><tr><td align="left" valign="top">Shibl et al [<xref ref-type="bibr" rid="ref47">47</xref>]</td><td align="left" valign="top">Australia</td><td align="left" valign="top">Mixed methods</td><td align="left" valign="top">n=37<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender-men: n=24, women: n=13</td><td align="left" valign="top">General practitioners</td><td align="left" valign="top">CDSS</td><td align="left" valign="top">Usefulness, Facilitating conditions, Ease of use, Social influence, Trust in the knowledge base, Involvement, Moderating variables</td></tr><tr><td align="left" valign="top">Buck et al [<xref ref-type="bibr" rid="ref48">48</xref>]</td><td align="left" valign="top">Germany</td><td align="left" valign="top">Semistructured interviews</td><td align="left" valign="top">n=18<break/>Age (y): 34-70<break/>Gender-men: n=9, women: n=9</td><td align="left" valign="top">General practitioners</td><td align="left" valign="top">CDSS</td><td align="left" valign="top">Concerns, Expectations, Environmental influences, Individual characteristics, Minimum requirements of AI-enabled systems</td></tr><tr><td align="left" valign="top">Ahearn et al [<xref ref-type="bibr" rid="ref49">49</xref>]</td><td align="left" valign="top">Australia</td><td align="left" valign="top">Focus groups</td><td align="left" valign="top">n=22<break/>Gender-men: n=15, women: n=7</td><td align="left" valign="top">General practitioners</td><td align="left" valign="top">CDSS</td><td align="left" valign="top">Reaction to prompts, Concerns and potential problems, Effects on prescribing behavior, Need for training, Helpful features of decision support systems, Suggested improvements, Attitudes to evidence-based guidelines</td></tr><tr><td align="left" valign="top">Allen et al [<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">USA</td><td align="left" valign="top">Mixed methods</td><td align="left" valign="top">n=15<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">General practitioners</td><td align="left" valign="top">Unspecified AI</td><td align="left" valign="top">Concerns regarding technology, Concerns regarding people and processes</td></tr><tr><td align="left" valign="top">Nash et al [<xref ref-type="bibr" rid="ref51">51</xref>]</td><td align="left" valign="top">Canada</td><td align="left" valign="top">Semistructured interviews</td><td align="left" valign="top">n=10<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">Mixed<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">Unspecified AI</td><td align="left" valign="top">Context of Health Care Setting, Knowledge, Foundation of Trust: Accuracy, Experience, and Openness, Internal and External Influences, Anticipated Impact of AI</td></tr><tr><td align="left" valign="top">Upshaw et al [<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">Canada</td><td align="left" valign="top">Deliberative dialogue</td><td align="left" valign="top">n=21<break/>Age (y): 28-64<break/>Gender-men: n=12, women: n=9</td><td align="left" valign="top">Mixed<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">CDSS</td><td align="left" valign="top">Priority applications of AI in primary care, Impact of AI on primary care provider roles, Considerations for provider training in AI</td></tr><tr><td align="left" valign="top">Libon et al [<xref ref-type="bibr" rid="ref53">53</xref>]</td><td align="left" valign="top">Canada</td><td align="left" valign="top">Semistructured interviews</td><td align="left" valign="top">n=8<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">Mixed<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">Unspecified AI</td><td align="left" valign="top">Provider satisfaction, Difficulties with implementation, Impact on patient care</td></tr><tr><td align="left" valign="top">Sangers et al [<xref ref-type="bibr" rid="ref54">54</xref>]</td><td align="left" valign="top">Netherlands</td><td align="left" valign="top">Focus groups</td><td align="left" valign="top">n=17<break/>Age (y): 31-62<break/>Gender-men: n=7, women: n=10</td><td align="left" valign="top">General practitioners</td><td align="left" valign="top">Unspecified AI</td><td align="left" valign="top">Perceived Benefits, Perceived Barriers, Preconditions for Implementation</td></tr><tr><td align="left" valign="top">Helenason et al [<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">Sweden</td><td align="left" valign="top">Semistructured interviews</td><td align="left" valign="top">n=15<break/>Age<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup><break/>Gender<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">General practitioners</td><td align="left" valign="top">CDSS</td><td align="left" valign="top">Trust, Usability and User Experience, Clinical Context</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Age/Gender: undisclosed or not distinguishable from nonclinicians.</p></fn><fn id="table1fn2"><p><sup>b</sup>Mixed: doctors, nurses, physiotherapists, or other health care professionals providing direct patient care.</p></fn><fn id="table1fn3"><p><sup>c</sup>CDSS: clinical decision support system.</p></fn><fn id="table1fn4"><p><sup>d</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Critical Appraisal</title><p>The critical appraisal using the CASP qualitative checklist indicated that all included studies had clear research aims, appropriate qualitative methodologies, and well-reported findings. Several studies lacked sufficient transparency in ethical considerations (<xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>).</p></sec><sec id="s3-3"><title>Findings</title><p>The thematic analysis identified eight descriptive themes. These descriptive themes represent recurring elements identified across studies and served as the foundation for the synthesis of broader analytical themes. Thematic synthesis resulted in three analytical themes: the human-machine relationship, the technologically enhanced clinic, and the societal impact of AI. All themes are presented in <xref ref-type="table" rid="table2">Table 2</xref>. There was a wide time span across the included studies. Earlier studies, conducted between 2003 and 2013, explored perspectives on less advanced systems, such as CDSSs not based on ML [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref49">49</xref>], whereas later studies included more advanced AI systems, such as GAI and NLP [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref54">54</xref>], or a CDSS based on ML [<xref ref-type="bibr" rid="ref55">55</xref>].</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Analytical themes and descriptive themes.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">The human-machine relationship</td><td align="left" valign="bottom">The technologically enhanced clinic</td><td align="left" valign="bottom">The societal impact of AI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup></td></tr></thead><tbody><tr><td align="left" valign="top">Interaction with AI</td><td align="left" valign="top">Workplace changes</td><td align="left" valign="top">Bias</td></tr><tr><td align="left" valign="top">Resistance to AI</td><td align="left" valign="top">Technological concerns</td><td align="left" valign="top">Data security, privacy, and legal implications</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Clinical impact</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Desired features</td><td align="left" valign="top"/></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-4"><title>Theme 1: The Human-Machine Relationship</title><sec id="s3-4-1"><title>Interaction With AI</title><p>The relationship between humans and AI was approached from different perspectives, and several clinicians perceive AI as an assistant that could alleviate the burden of specific tasks [<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref51">51</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]. Some clinicians suggested that AI may have a negative impact on the clinician-patient relationship through the lack of a human connection [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]. However, some findings suggested increased clinician empathy with AI assistance [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref50">50</xref>] or even facilitation of communication between a clinician and a patient [<xref ref-type="bibr" rid="ref45">45</xref>]. Several clinicians wished for AI to enhance rather than replace the relationship between clinician and patient [<xref ref-type="bibr" rid="ref52">52</xref>]. The idea of working with the AI algorithm to present information and decisions to the patient was also appreciated [<xref ref-type="bibr" rid="ref43">43</xref>]. It was also believed that the use of AI could increase time spent with patients rather than other tasks [<xref ref-type="bibr" rid="ref45">45</xref>]:</p><disp-quote><p>Yes, just taking my hands off the computer, getting my eyes off the screen, so that I can be spending time with the patient. And also saving me the documentation time, because you can either spend more time with the patient or see more patients.</p><attrib>GP</attrib></disp-quote><p>Some clinicians believed that a patient&#x2019;s confidence in the clinician would increase by using AI [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref55">55</xref>], and others believed that AI would empower clinicians to be more confident in their practice [<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]. Several clinicians thought that AI could be valuable in educating clinicians or providing new clinical insights [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. However, there were concerns that by introducing AI systems to inexperienced clinicians, there could be a risk of declining proficiency due to the automation of tasks [<xref ref-type="bibr" rid="ref52">52</xref>]. Clinicians highlighted that AI could complement human medical practitioners with nonhuman traits, such as the ability to not get tired, thus retaining its clinical accuracy [<xref ref-type="bibr" rid="ref48">48</xref>]. Many clinicians believed that AI could improve the clinical consultation by shifting the clinician&#x2019;s focus toward the patient [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. Clinicians in one study believed that AI systems currently focus more on task efficiency than on improving patient care [<xref ref-type="bibr" rid="ref46">46</xref>].</p><p>Some clinicians ultimately wished to retain control over the AI system, keeping the clinician in charge [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Whether clinicians wanted to have a deeper understanding of the programming behind an AI system differed, with some clinicians having a desire for a more profound knowledge [<xref ref-type="bibr" rid="ref48">48</xref>] and others believing that it was not required [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Clinicians&#x2019; trust in AI systems was discussed with conflicting opinions, where some believed that AI could ultimately be trusted, and some did not [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Trust in the AI system would increase if it were scientifically proven to work or validated by other health care professionals, according to some clinicians [<xref ref-type="bibr" rid="ref55">55</xref>]. The topic was further investigated in discussions surrounding trust in the AI creators, where clinicians expressed that they would trust the system if it were based on a well-known physician or author. They voiced no concern regarding how the system was developed or who the software developers were [<xref ref-type="bibr" rid="ref47">47</xref>].</p></sec><sec id="s3-4-2"><title>Resistance to AI</title><p>Several clinicians voiced concerns regarding AI replacing medical staff or jobs in other sectors [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Some expecting doctors to eventually assist AI [<xref ref-type="bibr" rid="ref46">46</xref>]:</p><disp-quote><p>I think eventually the doctors will be the assistant doctors &#x2026; Doctors will assist artificial intelligence what to do &#x2026; eventually &#x2026; we'll be helping it. I think we'll be assistant &#x2026; Because they'll be doing everything. It will be just saying, yes, no, yes, no. Say supervision, but we'll be assisting.</p><attrib>GP</attrib></disp-quote><p>Other clinicians dismissed such fears [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. It was also thought that clinicians&#x2019; gut feelings could not be replaced by AI [<xref ref-type="bibr" rid="ref51">51</xref>]. There was also resistance or skepticism toward AI systems. Several clinicians voiced potential negative effects on their workflows, stating that they perceived AI to cause increased time expenditure [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Other clinicians believed that there was no change in time expenditure [<xref ref-type="bibr" rid="ref44">44</xref>]. It was also believed that decreased time expenditure could have adverse effects due to patients becoming accustomed to the increased speed and effectiveness of certain processes [<xref ref-type="bibr" rid="ref50">50</xref>].</p><p>Previous negative experiences with the introduction of electronic health records could influence skepticism toward AI [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. Discussions about factors contributing to AI resistance emerged, and some clinicians concluded that this could be caused by age, personal interests, or alignment with accepting new technology [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Other barriers, such as being limited by time or resources, were also mentioned [<xref ref-type="bibr" rid="ref43">43</xref>]. Some clinicians were worried about patient safety due to concerns about AI safety and algorithmic bias [<xref ref-type="bibr" rid="ref52">52</xref>].</p></sec></sec><sec id="s3-5"><title>Theme 2: The Technologically Enhanced Clinic</title><sec id="s3-5-1"><title>Workplace Changes</title><p>There were different clinician perspectives on automating certain tasks, the impact on workload, and integrating a new system in a workplace [<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]. Clinicians expressed a belief that using AI systems could potentially save time through automation of administrative tasks or clinical decision support [<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Some voiced that this was the foremost reason for using AI [<xref ref-type="bibr" rid="ref45">45</xref>]:</p><disp-quote><p>I'd be confident that it would save me time but not replace me thinking, which is not the aim, for me it&#x2019;s the saving time.</p><attrib>GP</attrib></disp-quote><p>Not all clinicians agreed on this topic. Some thought that AI would increase their workload by complicating their tasks [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref50">50</xref>] or disrupting their workflow and disturbing their train of thought [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. Interference with the clinician&#x2019;s decision-making process by the suggestion of unnecessary tests was highlighted as a negative impact on workflow [<xref ref-type="bibr" rid="ref43">43</xref>]. Opinions were mixed regarding whether cost was an important factor for implementation. Some believed that cost could be a factor in system acceptance [<xref ref-type="bibr" rid="ref48">48</xref>], whereas others did not [<xref ref-type="bibr" rid="ref47">47</xref>]. Some thought that there was probably a positive cost-benefit for clinics using AI systems [<xref ref-type="bibr" rid="ref54">54</xref>]. Some clinicians wished that the focus of AI systems should be to assist patient care and not strictly for financial gain [<xref ref-type="bibr" rid="ref47">47</xref>]. Assisting clinicians in primary care centers was thought especially important in countering physician burnout [<xref ref-type="bibr" rid="ref51">51</xref>]. Several studies voiced concerns regarding integrating AI systems into already established working environments. Many clinicians wished for seamless integration of AI systems into existing systems [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref52">52</xref>], whereas some wished for the systems to be completely separated [<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. The need for established policies and routines prior to AI system adoption was also voiced [<xref ref-type="bibr" rid="ref55">55</xref>].</p></sec><sec id="s3-5-2"><title>Technological Concerns</title><p>Clinicians voiced several technological concerns, including the risk of technical issues, issues with the AI system itself, or user errors [<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Several clinicians had experience with CDSS providing inaccurate information [<xref ref-type="bibr" rid="ref47">47</xref>] or leaving out important information [<xref ref-type="bibr" rid="ref49">49</xref>]. Many clinicians were concerned with the risk of AI producing erroneous information or having a low accuracy [<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. The AI&#x2019;s ability to decide whether a piece of information was relevant was also a concern [<xref ref-type="bibr" rid="ref45">45</xref>].</p><p>Simultaneously, there was also a fear of user error, meaning clinicians were uncomfortable using the system and potentially causing errors [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. Several clinicians further explored this subject, mentioning that they would fear not knowing how to use the system in front of patients [<xref ref-type="bibr" rid="ref47">47</xref>]. The reason for this was not further specified, but other clinicians deemed using an AI system nonintuitive [<xref ref-type="bibr" rid="ref44">44</xref>]. Another technical concern expressed by some clinicians was the possibility of complete system failure [<xref ref-type="bibr" rid="ref48">48</xref>]:</p><disp-quote><p>If my system goes down, my AI is on standby, then sorry, I can&#x2019;t diagnose, my system strikes out. That is why it&#x2019;s nice to be able to write down with a pen on paper what a patient has and has received.</p><attrib>GP</attrib></disp-quote><p>Accessibility was approached from different perspectives. There were wishes for AI systems to be easily accessible from the electronic health record [<xref ref-type="bibr" rid="ref43">43</xref>]. Clinicians also expressed concern that some patient groups could be less likely to have access to the technology needed for AI interaction [<xref ref-type="bibr" rid="ref43">43</xref>]. Likewise, using AI as triage could be inaccessible for some populations [<xref ref-type="bibr" rid="ref52">52</xref>]. Computer and AI training for clinicians was generally seen as an important factor for implementation.</p><p>Clinicians from different studies expressed the need for specific training [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>] and being regularly informed about AI technology [<xref ref-type="bibr" rid="ref48">48</xref>]. Some believed there was no need for training, as they had been using an AI system without prior professional training [<xref ref-type="bibr" rid="ref47">47</xref>]. The growing use of similar algorithms or programs, such as CDSS or other AI systems, was generally considered beneficial for implementation [<xref ref-type="bibr" rid="ref43">43</xref>]. Even though several technical concerns were voiced, remarks from one study were that technological advancements in medicine are also necessary. It would help clinicians stay up-to-date with the increasing amount of medical knowledge, enable predictive models, and keep up with demographic changes, making clinics technologically modern for younger physicians [<xref ref-type="bibr" rid="ref48">48</xref>].</p></sec><sec id="s3-5-3"><title>Clinical Impact</title><p>Thoughts on how the usage of AI would impact clinical work emerged in multiple studies [<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]. Some mentioned the benefits of diagnostic support, increasing clinical effectiveness and accuracy [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref55">55</xref>], while others mentioned positive effects on their prescribing behavior by using a pharmaceutical decision support system [<xref ref-type="bibr" rid="ref49">49</xref>]. Using AI technology in remote diagnostics or examinations was considered beneficial [<xref ref-type="bibr" rid="ref53">53</xref>]. Clinicians also discussed retrieving a medical history using AI as a helpful tool [<xref ref-type="bibr" rid="ref45">45</xref>]. However, some clinicians thought that by removing the act of writing from the clinician, their thought process could be disrupted [<xref ref-type="bibr" rid="ref46">46</xref>]:</p><disp-quote><p>One of the advantages of when you write it is it reinforces what you thought &#x2026; It&#x2019;s a thinking process, because you actually think about what this actually means? &#x2026; How can you capture that writing experience in an electronic medium?</p><attrib>GP</attrib></disp-quote></sec><sec id="s3-5-4"><title>Desired Features</title><p>Clinicians had several ideas regarding features they would like to see in AI systems and decision support systems. Their wishes for specific features and the implications of these features were recurring topics [<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Many expressed the importance of the extraction and summarization of essential data [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. They also expected AI systems to process more information than any human brain could, all while maintaining a high working speed [<xref ref-type="bibr" rid="ref48">48</xref>]. Clinicians wished for AI to be more accurate and yield better results than humans so that it would not be considered obsolete [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. The possibility for clinicians to customize the information presented or for the AI to adapt to the clinicians&#x2019; needs was deemed important [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref>].</p><p>Other desired features were AI-assisted patient triage [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref52">52</xref>], identifying patients with high risk of disease [<xref ref-type="bibr" rid="ref52">52</xref>] and integrating AI into telehealth systems [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]. Besides purely clinical functions, suggestions included using AI to predict visit surges and for health resource planning [<xref ref-type="bibr" rid="ref52">52</xref>]. Numerous clinicians emphasized the necessity for AI systems to be user-friendly, thereby enhancing the probability of their adoption [<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref55">55</xref>].</p><p>Some clinicians felt strongly that the AI should provide a clear, logical explanation of how it arrived at its conclusions, emphasizing the need for transparency and traceability of the AI&#x2019;s algorithm. Others, however, argued that as long as the AI&#x2019;s output is accurate and reliable, understanding its inner workings is less important [<xref ref-type="bibr" rid="ref50">50</xref>]. Some clinicians also expressed that their limited knowledge of AI systems hindered them from providing ideas on possible features [<xref ref-type="bibr" rid="ref51">51</xref>]:</p><disp-quote><p>And I do not know enough about artificial intelligence to give you big ideas of what could be done.</p><attrib>Health care provider</attrib></disp-quote></sec></sec><sec id="s3-6"><title>Theme 3: The Societal Impact of AI</title><sec id="s3-6-1"><title>Bias</title><p>Clinicians discussed several types of bias, some of which could affect the population and others which might affect clinicians. Opinions regarding bias and how it could affect our society were expressed [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]. Clinicians were worried that the data used to train an AI system could be historically biased by being trained mainly on information from only one or a few demographic groups [<xref ref-type="bibr" rid="ref46">46</xref>]. They also expressed that bias could &#x201C;leak&#x201D; into the AI from its creators [<xref ref-type="bibr" rid="ref50">50</xref>]:</p><disp-quote><p>The thing I&#x2019;m apprehensive about is, how are we teaching AI these things because some of those biases could leak in.</p><attrib>GP</attrib></disp-quote><p>Clinicians were additionally concerned that automation bias could affect the clinician&#x2019;s decision-making or potentially distract the clinician from important information. This means that clinicians could over-rely on the information presented by AI and prioritize it over their own reasoning [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>].</p></sec><sec id="s3-6-2"><title>Data Security, Privacy, and Legal Implications</title><p>Patient data security and privacy were topics discussed from different perspectives. Clinicians expressed concerns about the risks of having sensitive data processed through AI and the impact it could have both on patients and clinicians [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Many clinicians voiced concern over the security surrounding data processing, whether the data would be encrypted, and the risks of hacking or misuse of data [<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref48">48</xref>]. On the other hand, some clinicians did not consider security important since they were unaware of any security issues [<xref ref-type="bibr" rid="ref47">47</xref>].</p><p>Other clinicians wanted to know how the AI system handled data privacy [<xref ref-type="bibr" rid="ref45">45</xref>]. Some clinicians thought that letting AI systems process all the data produced in a clinical setting could be an inherent problem, leading to possible monitoring of clinicians. However, none of the participants could identify who would benefit from such surveillance [<xref ref-type="bibr" rid="ref48">48</xref>]. Some clinicians took a negative stance on the complete transparency potentially caused by using AI in documentation, as opposed to the natural filtration of information employed by clinicians. They also preferred keeping data from a patient-physician consultation private [<xref ref-type="bibr" rid="ref48">48</xref>]:</p><disp-quote><p>Patient data are very sensitive data. Disease data are very sensitive data. [There is the risk that] they are passed on somewhere, that some authorities who have nothing to do with it or should have nothing to do with it could intercept the data and use this to the disadvantage of the patients.</p><attrib>GP</attrib></disp-quote><p>In addition to patient data safety, the safety of doctors and how the availability and transparency of data could work against them were considered. Clinicians suggested that doctor safety could be jeopardized when using AI for documentation or decision support. An example given by clinicians was if the AI had suggested something that a clinician did not take notice of or if there could be compromising data in what the AI system documented [<xref ref-type="bibr" rid="ref46">46</xref>].</p><p>Legal implications were also a large topic of discussion in several studies. Many clinicians voiced legal concerns, primarily related to fear of legal action taken toward the clinician if they acted outside of recommendations given by the AI system [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Another viewpoint was that clinicians expected there to be built-in legal protection that shifted responsibility from the clinician to an AI system [<xref ref-type="bibr" rid="ref48">48</xref>], or for there to be a clearly defined medicolegal liability [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>].</p></sec></sec><sec id="s3-7"><title>Assessment of Confidence</title><p>The results of the GRADE-CERQual assessment for the review findings are summarized in <xref ref-type="table" rid="table3">Table 3</xref>.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Summary of qualitative findings, Grading of Recommendations Assessment, Development and Evaluation-Confidence in Evidence from Reviews of Qualitative Research (GRADE-CERQual) assessments.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Summary of review findings</td><td align="left" valign="bottom">References</td><td align="left" valign="bottom">CERQual<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> assessment of confidence in the evidence</td><td align="left" valign="bottom">Explanation of CERQual assessment</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="4">Interaction with AI<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians perceive AI as an assistant that could alleviate the burden of specific tasks.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref51">51</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding adequacy.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>AI may negatively impact the clinician-patient relationship due to a lack of human connection.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]</td><td align="left" valign="top">Low confidence</td><td align="left" valign="top">There were serious concerns regarding adequacy, moderate concerns regarding coherence, and minor concerns regarding methodology, justifying two levels of confidence downgrade.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>AI could enhance clinician empathy or facilitate clinician-patient communication and confidence.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref52">52</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">Moderate confidence</td><td align="left" valign="top">There were moderate concerns regarding coherence and minor concerns regarding adequacy, justifying one level of confidence downgrade.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians wish to retain control over AI systems and understand how they function.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were moderate concerns regarding adequacy, and minor concerns regarding methodology and coherence.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians expressed conflicting views regarding trust in AI.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were moderate concerns regarding adequacy. The concern was not deemed serious enough for a downgrade of confidence.</td></tr><tr><td align="left" valign="top" colspan="4">Resistance to AI</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Some clinicians fear being replaced or having their role diminished by AI.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Moderate confidence</td><td align="left" valign="top">There were moderate concerns regarding adequacy, and minor concerns regarding methodology and coherence, justifying one level of confidence downgrade.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>The introduction of AI could increase clinicians&#x2019; time expenditure or disrupt workflows.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">Moderate confidence</td><td align="left" valign="top">There were moderate concerns regarding coherence and minor concerns regarding methodology and adequacy, justifying one level of confidence downgrade.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Multiple factors influence skepticism toward AI, such as previous experiences, time, age, interests, and technology acceptance</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology and adequacy.</td></tr><tr><td align="left" valign="top" colspan="4">Workplace changes</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>AI systems could save clinicians time through automation.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">Moderate confidence</td><td align="left" valign="top">There were moderate concerns regarding coherence and minor concerns regarding methodology and adequacy, justifying one level of confidence downgrade.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians held differing perspectives on the importance of cost for AI system adoption</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]</td><td align="left" valign="top">Moderate confidence</td><td align="left" valign="top">There were moderate concerns regarding adequacy and minor concerns regarding methodology, justifying one level of confidence downgrade.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>There were conflicting views on the ideal level of AI system integration with existing clinical systems.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology.</td></tr><tr><td align="left" valign="top" colspan="4">Technological concerns</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinician concerns regarding technological issues such as AI system or user errors.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians expressed a need for specific training in AI systems and being informed about AI technology</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology, coherence, and adequacy.</td></tr><tr><td align="left" valign="top" colspan="4"><bold>Clinical impact</bold></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>AI could provide valuable diagnostic support, increasing clinical effectiveness and accuracy.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology and adequacy.</td></tr><tr><td align="left" valign="top" colspan="4">Desired features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians expressed preferences for specific features in AI systems</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology and adequacy.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians emphasized the importance of AI systems being adaptable and customizable</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>User-friendliness of AI systems was emphasized by numerous clinicians as essential for adoption</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology and adequacy.</td></tr><tr><td align="left" valign="top" colspan="4">Bias</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians are concerned that AI could perpetuate biases from its training data or its creators.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology and adequacy.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Clinicians fear over-relying on AI, leading to automation bias.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology and adequacy.</td></tr><tr><td align="left" valign="top" colspan="4">Data security, privacy, and legal implications</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>The security of patient data processed by AI is a significant concern for clinicians.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology, coherence, and adequacy.</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>There are significant concerns regarding the legal liability and responsibility when using AI in clinical decisions.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]</td><td align="left" valign="top">High confidence</td><td align="left" valign="top">There were minor concerns regarding methodology and adequacy.</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>CERQual: Confidence in Evidence from Reviews of Qualitative research.</p></fn><fn id="table3fn2"><p><sup>b</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap><p>Details are provided in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Results</title><p>When synthesizing primary care clinician perspectives of various AI systems, 3 analytical themes emerged. The GRADE-CERQual assessment indicated high confidence in 15 findings, moderate confidence in 5 findings, and low confidence in one finding.</p></sec><sec id="s4-2"><title>The Human-Machine Relationship</title><p>There were many positive remarks on the potential for AI to assist clinicians in administrative tasks [<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref51">51</xref>-<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref55">55</xref>], clinical work [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>], and education [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. Some studies highlighted fears of AI replacing human roles [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Resistance to AI was noted, with concerns that AI might disrupt workflow and increase task time [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. The underlying causes of skepticism were discussed. Some attributed it to previous negative experiences with other digital tools [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref52">52</xref>], others to their age or technical alignment [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Positive views on AI systems were generally seen in studies where AI tools were deemed effective, seamlessly integrated, and saved time [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>].</p></sec><sec id="s4-3"><title>The Technologically Enhanced Clinic</title><p>Some clinicians saw benefits in automating tasks like documentation and consultation [<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref48">48</xref>], whereas others feared potentially increased workload [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. Technical concerns were found, including fears of computer errors or user errors [<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Clinicians valued AI&#x2019;s potential to assist in clinical tasks such as decision support [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref53">53</xref>-<xref ref-type="bibr" rid="ref55">55</xref>]. Clinicians discussed desirable AI features, such as diagnostic support, integration with telemedicine, and customization options [<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Several studies mentioned the importance of the system&#x2019;s user-friendliness [<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref55">55</xref>].</p></sec><sec id="s4-4"><title>The Societal Impact of AI</title><p>Security and privacy issues were highlighted, particularly regarding the handling of sensitive patient data and the risks of unauthorized access [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>-<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Societal impact, including potential biases and overreliance on AI, was concerns [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>], and potential legal implications if clinicians acted outside of the AI recommendations [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Clinicians expressed greater confidence in adopting AI systems that had received formal regulatory approval or institutional endorsement [<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref51">51</xref>].</p></sec><sec id="s4-5"><title>Comparison With Prior Work</title><p>Our findings regarding clinicians&#x2019; views of the potential of AI and reservations regarding safety aspects are similar to a previous systematic review of AI-powered chatbots for managing chronic illness, which provided insights into the usability and acceptance of AI in health care. The review found that participants gave positive feedback regarding perceived usefulness, satisfaction, and ease of use. The review also concluded that the safety of AI-powered chatbots has been overlooked and needs to be considered more thoroughly in future designs [<xref ref-type="bibr" rid="ref56">56</xref>].</p><p>As AI expands into health care, a significant concern has emerged: the risk of bias. Since AI relies on historical data that could be statistically or socially biased, it could potentially incur a risk of worsening patient outcomes [<xref ref-type="bibr" rid="ref57">57</xref>]. This coincides with our findings regarding clinicians&#x2019; concerns about biased AI systems.</p><p>In this review, clinicians were positive toward simplifying certain tasks using AI while simultaneously having concerns regarding technical aspects. Another systematic review of stakeholders&#x2019; perspectives on clinical AI implementation, which included perspectives of health care providers, similarly found that health care providers saw benefits in using AI for reducing repetitive tasks, improving patient outcomes, and clinical training. Reservations toward AI included implementation issues, uncertainty around its mechanics, and skepticism toward its ability to inform clinical decisions [<xref ref-type="bibr" rid="ref58">58</xref>].</p><p>In the current review, opinions on whether clinicians should be involved in the development of AI systems were mixed. One scoping review published in 2020 found the engagement of clinicians in health care AI development and research to be crucial [<xref ref-type="bibr" rid="ref59">59</xref>]. The ethical and regulatory challenges expressed in the findings of this review are also brought to light in a scoping review published in 2022. Their findings suggest that AI research and development in health care is currently outpacing the creation of supporting AI governance, and there is a need for international collaboration to facilitate comprehensive AI governance in this sector [<xref ref-type="bibr" rid="ref60">60</xref>]. There were similar findings in another article published in 2021, where the author concludes that there is an apparent risk of regulations and oversight falling behind AI&#x2019;s rapid development and integration [<xref ref-type="bibr" rid="ref61">61</xref>].</p><p>This review focuses solely on clinicians, although many other professionals are implicated in the adoption of AI into health care. Further investigation of perspectives of information technology experts, managers, and other stakeholders could prove valuable in the development, adoption, and integration of AI systems [<xref ref-type="bibr" rid="ref62">62</xref>].</p></sec><sec id="s4-6"><title>Strengths and Limitations</title><sec id="s4-6-1"><title>Strengths</title><p>To the best of the authors&#x2019; knowledge, no systematic review on this topic in a primary health care setting is currently available. The review provides new and valuable information on the topic. The review adhered to the PRISMA and ENTREQ guidelines, was pre-registered with PROSPERO, and searched across several large databases. Thematic synthesis was employed by two independent authors, enhancing reliability, validity, and reducing bias.</p></sec><sec id="s4-6-2"><title>Limitations</title><p>The field of AI is rapidly expanding, and perspectives on AI in primary health care could swiftly change over time. Several new studies have emerged since this analysis was conducted; thus, further research is needed to better understand clinician perspectives on the latest AI advancements. This review relied on the authors&#x2019; definitions of AI in the included articles, potentially increasing the variability of the results. The limited geographic range of the included studies may affect the generalizability of the findings. In this review, we only included studies from high-income countries. Research on AI in health care in low- and middle-income countries is very limited, as most AI health systems are developed and tested in high-income countries. Additionally, no PRESS checklist for peer review of search strings was completed.</p></sec></sec><sec id="s4-7"><title>Practical Implications and Identified Gaps in the Qualitative Literature</title><p>The findings of this review suggest that AI systems should focus on reducing administrative burden and supporting certain clinical tasks, provided they do not disrupt clinicians&#x2019; workflows. These systems must demonstrate time-saving capabilities and seamlessly integrate with existing infrastructure, such as electronic medical records. Through these functions, an AI system could enhance a medical visit by allowing the clinician to focus on patient contact rather than administrative tasks.</p><p>Continuous monitoring for computer errors, structured AI training programs for clinicians, and simplifying user interfaces are essential to minimize user errors. Additionally, ensuring robust data handling practices is critical to maintaining patient privacy and security. There is also a desire among clinicians for clearly defined medicolegal responsibilities.</p><p>Developers of health care-related AI systems should aim to mitigate system bias and consider collaborating with clinicians in the development process to build initial trust and address potential concerns. Involving clinicians with previous experiences of AI or CDSS in the development or integration of AI systems might facilitate adoption and use. Moreover, current AI tools should complement, not replace, clinical decision-making. It is important to provide younger clinicians with opportunities to develop critical reasoning skills without fostering over-reliance on AI-generated outputs.</p><p>The review revealed gaps regarding clinician perspectives on AI in primary health care, specifically in LLMs. Perspectives on ethical implications focused mainly on bias in AI systems, patient privacy and data security, medicolegal implications, transparency and accountability, and equity in AI system access. However, deeper analyses regarding the ethical implications of modern AI systems, including how AI might alter clinicians&#x2019; professional roles, authority, and autonomy, were scarce. Some of the included studies mentioned clinicians expressing thoughts on their reliance on AI systems and how their autonomy might be affected. However, further research could provide valuable information on these aspects that directly influence clinicians&#x2019; acceptance and utilization of AI technologies.</p><p>Findings from this review suggest that some clinicians are aware of long-term job implications and possible job displacement due to the introduction of AI into health care. Threats to the professional autonomy of clinicians could be due to automation bias, potentially overriding or deskilling clinical judgment with decision-support recommendations becoming default options, or by reducing the professional freedom of clinicians when many clinical actions are turned into data, enabling scrutiny of even the smallest decisions. Further research on AI&#x2019;s effects on the evolution of clinicians&#x2019; career paths and future autonomy is warranted.</p><p>While many clinicians assume ultimate responsibility in patient care, several fear &#x201C;legal whiplash&#x201D; if they disregard an AI recommendation that later proved correct or followed one that proved harmful. These findings highlight the need for clear governance frameworks by having AI tools accredited and liability boundaries specified by a professional body such as the EU AI Act [<xref ref-type="bibr" rid="ref63">63</xref>] and FDA Software-as-a-Medical-Device (SaMD) guidance [<xref ref-type="bibr" rid="ref64">64</xref>]. Further empirical research is recommended to evaluate how such regulations translate into everyday primary care. Another possible pre-condition for ethically acceptable AI use could be provided by transparent AI reasoning paths, enabling clinicians to follow a defensible audit trail rather than a simple, final output that could prevent shared decision-making with patients.</p></sec><sec id="s4-8"><title>Future Directions</title><p>Further research on AI in primary health care is needed, especially in low- and middle-income countries. Notable gaps in the literature include evaluations of LLMs in primary health care, which are expected to have great potential. Ethnographic studies could yield deeper insights into AI&#x2019;s impact on the professional role of clinicians and long-term career implications. Further in-depth, qualitative research on these topics could prove helpful for future AI system development and integration.</p><p>Future research should also expand beyond cross-sectional studies to longitudinal, mixed-methods studies that follow AI systems from adoption to routine use in primary care clinics for further in-depth analysis of AI use and a deeper understanding of facilitators and barriers to adoption. Additionally, systematic reviews targeting specific types of AI or clinical use cases would support a more nuanced understanding of AI implementation in diverse primary care contexts.</p></sec><sec id="s4-9"><title>Conclusions</title><p>Clinicians view AI as a technology that could both enhance and complicate primary health care. While AI can provide substantial support, its integration into health care requires careful consideration of ethical implications, technical reliability, and the maintenance of human oversight. Interpretation is constrained by heterogeneity in qualitative methods and the diverse AI technologies examined across studies. More in-depth qualitative research on the effects of AI on clinicians&#x2019; careers and autonomy could prove helpful for the future development of AI systems.</p></sec></sec></body><back><ack><p>Generative AI tools (ChatGPT-4o and o3; OpenAI, 2024&#x2013;2025 releases) were used solely to assist with language editing and phrasing improvements. The AI tools had no role in data analysis or interpretation.</p></ack><notes><sec><title>Funding</title><p>This study has been funded by Region V&#x00E4;stra G&#x00F6;taland (reference: RHS 2024-01018) and The Swedish Research Council (reference: 2023-05744)</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: AE, RB, AM (Alisa Mundzic), DS, RG</p><p>Methodology: AE, RB, AM (Alisa Mundzic), DS, RG</p><p>Investigation: RB, AM (Alisa Mundzic)</p><p>Data Curation: RB, AM (Alisa Mundzic)</p><p>Writing - original draft: RB</p><p>Writing - review and editing: all authors</p><p>Supervision: AE</p><p>Funding Acquisition: AE</p><p>Project administration: AE</p><p>Visualization: RB</p></fn><fn fn-type="conflict"><p>AE has been employed by Tandem Health AB since September 1, 2024, developing AI products for health care providers. Tandem Health AB had no role in Conceptualization, Methodology, Investigation, Data Curation, Writing, Supervision, Funding Acquisition, Project Administration, or Visualization.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CASP</term><def><p>critical appraisal skills program</p></def></def-item><def-item><term id="abb3">CDSS</term><def><p>clinical decision support system</p></def></def-item><def-item><term id="abb4">ENTREQ</term><def><p>Enhanced Transparency in Reporting the Synthesis of Qualitative Research</p></def></def-item><def-item><term id="abb5">GAI</term><def><p>generative artificial intelligence</p></def></def-item><def-item><term id="abb6">GP</term><def><p>general practitioner</p></def></def-item><def-item><term id="abb7">GRADE-CERQual</term><def><p>Grading of Recommendations Assessment, Development, and Evaluation&#x2013;Confidence in the Evidence From Reviews of Qualitative Research</p></def></def-item><def-item><term id="abb8">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb9">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb10">NLP</term><def><p>natural language processing</p></def></def-item><def-item><term id="abb11">PRISMA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p></def></def-item><def-item><term id="abb12">PRISMA-S</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses Search</p></def></def-item><def-item><term id="abb13">SPIDER</term><def><p>Sample, Phenomenon of Interest, Design, Evaluation, and Research</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lou</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>R</given-names> </name><name name-style="western"><surname>Deng</surname><given-names>J</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Z</given-names> </name></person-group><article-title>What is the impact of integrated care on the job satisfaction of primary healthcare providers: a systematic review</article-title><source>Hum Resour Health</source><year>2023</year><month>11</month><day>1</day><volume>21</volume><issue>1</issue><fpage>86</fpage><pub-id pub-id-type="doi">10.1186/s12960-023-00874-w</pub-id><pub-id pub-id-type="medline">37915032</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gomez-Cabello</surname><given-names>CA</given-names> </name><name name-style="western"><surname>Borna</surname><given-names>S</given-names> </name><name name-style="western"><surname>Pressman</surname><given-names>S</given-names> </name><name name-style="western"><surname>Haider</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Haider</surname><given-names>CR</given-names> </name><name name-style="western"><surname>Forte</surname><given-names>AJ</given-names> </name></person-group><article-title>Artificial-intelligence-based clinical decision support systems in primary care: a scoping review of current clinical implementations</article-title><source>Eur J Investig Health Psychol Educ</source><year>2024</year><month>03</month><day>13</day><volume>14</volume><issue>3</issue><fpage>685</fpage><lpage>698</lpage><pub-id pub-id-type="doi">10.3390/ejihpe14030045</pub-id><pub-id pub-id-type="medline">38534906</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Naehrig</surname><given-names>D</given-names> </name><name name-style="western"><surname>Schokman</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hughes</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Epstein</surname><given-names>R</given-names> </name><name name-style="western"><surname>Hickie</surname><given-names>IB</given-names> </name><name name-style="western"><surname>Glozier</surname><given-names>N</given-names> </name></person-group><article-title>Effect of interventions for the well-being, satisfaction and flourishing of general practitioners-a systematic review</article-title><source>BMJ Open</source><year>2021</year><month>08</month><day>18</day><volume>11</volume><issue>8</issue><fpage>e046599</fpage><pub-id pub-id-type="doi">10.1136/bmjopen-2020-046599</pub-id><pub-id pub-id-type="medline">34408036</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wan</surname><given-names>P</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Tang</surname><given-names>W</given-names> </name><etal/></person-group><article-title>Outpatient reception via collaboration between nurses and a large language model: a randomized controlled trial</article-title><source>Nat Med</source><year>2024</year><month>10</month><volume>30</volume><issue>10</issue><fpage>2878</fpage><lpage>2885</lpage><pub-id pub-id-type="doi">10.1038/s41591-024-03148-7</pub-id><pub-id pub-id-type="medline">39009780</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Guan</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Integrated image-based deep learning and language models for primary diabetes care</article-title><source>Nat Med</source><year>2024</year><month>10</month><volume>30</volume><issue>10</issue><fpage>2886</fpage><lpage>2896</lpage><pub-id pub-id-type="doi">10.1038/s41591-024-03139-8</pub-id><pub-id pub-id-type="medline">39030266</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>Y</given-names> </name></person-group><article-title>Artificial intelligence: a survey on evolution, models, applications and future trends</article-title><source>J Manag Anal</source><year>2019</year><month>01</month><day>2</day><volume>6</volume><issue>1</issue><fpage>1</fpage><lpage>29</lpage><pub-id pub-id-type="doi">10.1080/23270012.2019.1570365</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chew</surname><given-names>HSJ</given-names> </name><name name-style="western"><surname>Achananuparp</surname><given-names>P</given-names> </name></person-group><article-title>Perceptions and needs of artificial intelligence in health care to increase adoption: scoping review</article-title><source>J Med Internet Res</source><year>2022</year><month>01</month><day>14</day><volume>24</volume><issue>1</issue><fpage>e32939</fpage><pub-id pub-id-type="doi">10.2196/32939</pub-id><pub-id pub-id-type="medline">35029538</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>P&#x0103;v&#x0103;loaia</surname><given-names>VD</given-names> </name><name name-style="western"><surname>Necula</surname><given-names>SC</given-names> </name></person-group><article-title>Artificial intelligence as a disruptive technology&#x2014;a systematic literature review</article-title><source>Electronics</source><year>2023</year><volume>12</volume><issue>5</issue><fpage>1102</fpage><pub-id pub-id-type="doi">10.3390/electronics12051102</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kelly</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kaye</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Oviedo-Trespalacios</surname><given-names>O</given-names> </name></person-group><article-title>What factors contribute to the acceptance of artificial intelligence? A systematic review</article-title><source>Telematics and Informatics</source><year>2023</year><month>02</month><volume>77</volume><fpage>101925</fpage><pub-id pub-id-type="doi">10.1016/j.tele.2022.101925</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guo</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Hao</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gong</surname><given-names>J</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>F</given-names> </name></person-group><article-title>Artificial intelligence in health care: bibliometric analysis</article-title><source>J Med Internet Res</source><year>2020</year><month>07</month><day>29</day><volume>22</volume><issue>7</issue><fpage>e18228</fpage><pub-id pub-id-type="doi">10.2196/18228</pub-id><pub-id pub-id-type="medline">32723713</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Friedman</surname><given-names>C</given-names> </name><name name-style="western"><surname>Elhadad</surname><given-names>N</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Shortliffe</surname><given-names>EH</given-names> </name><name name-style="western"><surname>Cimino</surname><given-names>JJ</given-names> </name></person-group><article-title>Natural language processing in health care and biomedicine</article-title><source>Biomedical Informatics: Computer Applications in Health Care and Biomedicine</source><year>2014</year><publisher-name>Springer London</publisher-name><fpage>255</fpage><lpage>284</lpage><pub-id pub-id-type="doi">10.1007/978-1-4471-4474-8_8</pub-id><pub-id pub-id-type="other">978-1-4471-4474-8</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Collier</surname><given-names>R</given-names> </name></person-group><article-title>Electronic health records contributing to physician burnout</article-title><source>CMAJ</source><year>2017</year><month>11</month><day>13</day><volume>189</volume><issue>45</issue><fpage>E1405</fpage><lpage>E1406</lpage><pub-id pub-id-type="doi">10.1503/cmaj.109-5522</pub-id><pub-id pub-id-type="medline">29133547</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Sandeep</surname><given-names>R</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Thomas</surname><given-names>FH</given-names> </name></person-group><article-title>Use of artificial intelligence in healthcare delivery</article-title><source>eHealth - Making Health Care Smarter</source><year>2018</year><publisher-name>IntechOpen</publisher-name><pub-id pub-id-type="doi">10.5772/intechopen.74714</pub-id><pub-id pub-id-type="other">978-1-78923-523-4</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Clusmann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kolbinger</surname><given-names>FR</given-names> </name><name name-style="western"><surname>Muti</surname><given-names>HS</given-names> </name><etal/></person-group><article-title>The future landscape of large language models in medicine</article-title><source>Commun Med (Lond)</source><year>2023</year><month>10</month><day>10</day><volume>3</volume><issue>1</issue><fpage>141</fpage><pub-id pub-id-type="doi">10.1038/s43856-023-00370-1</pub-id><pub-id pub-id-type="medline">37816837</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kung</surname><given-names>TH</given-names> </name><name name-style="western"><surname>Cheatham</surname><given-names>M</given-names> </name><name name-style="western"><surname>Medenilla</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Performance of ChatGPT on USMLE: potential for AI-assisted medical education using large language models</article-title><source>PLOS Digit Health</source><year>2023</year><month>02</month><volume>2</volume><issue>2</issue><fpage>e0000198</fpage><pub-id pub-id-type="doi">10.1371/journal.pdig.0000198</pub-id><pub-id pub-id-type="medline">36812645</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gilson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Safranek</surname><given-names>CW</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>T</given-names> </name><etal/></person-group><article-title>How does ChatGPT perform on the United States Medical Licensing Examination (USMLE)? The implications of large language models for medical education and knowledge assessment</article-title><source>JMIR Med Educ</source><year>2023</year><month>02</month><day>8</day><volume>9</volume><fpage>e45312</fpage><pub-id pub-id-type="doi">10.2196/45312</pub-id><pub-id pub-id-type="medline">36753318</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ayers</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Poliak</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dredze</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum</article-title><source>JAMA Intern Med</source><year>2023</year><month>06</month><day>1</day><volume>183</volume><issue>6</issue><fpage>589</fpage><lpage>596</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2023.1838</pub-id><pub-id pub-id-type="medline">37115527</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajkomar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dean</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kohane</surname><given-names>I</given-names> </name></person-group><article-title>Machine learning in medicine</article-title><source>N Engl J Med</source><year>2019</year><month>04</month><day>4</day><volume>380</volume><issue>14</issue><fpage>1347</fpage><lpage>1358</lpage><pub-id pub-id-type="doi">10.1056/NEJMra1814259</pub-id><pub-id pub-id-type="medline">30943338</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lyell</surname><given-names>D</given-names> </name><name name-style="western"><surname>Coiera</surname><given-names>E</given-names> </name></person-group><article-title>Automation bias and verification complexity: a systematic review</article-title><source>J Am Med Inform Assoc</source><year>2017</year><month>03</month><day>1</day><volume>24</volume><issue>2</issue><fpage>423</fpage><lpage>431</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocw105</pub-id><pub-id pub-id-type="medline">27516495</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="report"><article-title>Explanatory memorandum on the updated OECD definition of an AI system</article-title><year>2024</year><access-date>2026-01-22</access-date><publisher-name>OECD - Organisation for Economic Co-operation and Development</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.oecd.org/content/dam/oecd/en/publications/reports/2024/03/explanatory-memorandum-on-the-updated-oecd-definition-of-an-ai-system_3c815e51/623da898-en.pdf">https://www.oecd.org/content/dam/oecd/en/publications/reports/2024/03/explanatory-memorandum-on-the-updated-oecd-definition-of-an-ai-system_3c815e51/623da898-en.pdf</ext-link></comment></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Freyer</surname><given-names>O</given-names> </name><name name-style="western"><surname>Wiest</surname><given-names>IC</given-names> </name><name name-style="western"><surname>Kather</surname><given-names>JN</given-names> </name><name name-style="western"><surname>Gilbert</surname><given-names>S</given-names> </name></person-group><article-title>A future role for health applications of large language models depends on regulators enforcing safety standards</article-title><source>Lancet Digit Health</source><year>2024</year><month>09</month><volume>6</volume><issue>9</issue><fpage>e662</fpage><lpage>e672</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(24)00124-9</pub-id><pub-id pub-id-type="medline">39179311</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Feuerriegel</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hartmann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Janiesch</surname><given-names>C</given-names> </name><name name-style="western"><surname>Zschech</surname><given-names>P</given-names> </name></person-group><article-title>Generative AI</article-title><source>Bus Inf Syst Eng</source><year>2024</year><month>02</month><volume>66</volume><issue>1</issue><fpage>111</fpage><lpage>126</lpage><pub-id pub-id-type="doi">10.1007/s12599-023-00834-7</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Chowdhary</surname><given-names>KR</given-names> </name></person-group><article-title>Natural language processing</article-title><source>Fundamentals of Artificial Intelligence</source><year>2020</year><publisher-name>Springer India</publisher-name><fpage>603</fpage><lpage>649</lpage><pub-id pub-id-type="doi">10.1007/978-81-322-3972-7_19</pub-id><pub-id pub-id-type="other">978-81-322-3970-3</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Garg</surname><given-names>AX</given-names> </name><name name-style="western"><surname>Adhikari</surname><given-names>NKJ</given-names> </name><name name-style="western"><surname>McDonald</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Effects of computerized clinical decision support systems on practitioner performance and patient outcomes: a systematic review</article-title><source>JAMA</source><year>2005</year><month>03</month><day>9</day><volume>293</volume><issue>10</issue><fpage>1223</fpage><lpage>1238</lpage><pub-id pub-id-type="doi">10.1001/jama.293.10.1223</pub-id><pub-id pub-id-type="medline">15755945</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rathore</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Nikita</surname><given-names>S</given-names> </name><name name-style="western"><surname>Thakur</surname><given-names>G</given-names> </name><name name-style="western"><surname>Mishra</surname><given-names>S</given-names> </name></person-group><article-title>Artificial intelligence and machine learning applications in biopharmaceutical manufacturing</article-title><source>Trends Biotechnol</source><year>2023</year><month>04</month><volume>41</volume><issue>4</issue><fpage>497</fpage><lpage>510</lpage><pub-id pub-id-type="doi">10.1016/j.tibtech.2022.08.007</pub-id><pub-id pub-id-type="medline">36117026</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Noyes</surname><given-names>J</given-names> </name><name name-style="western"><surname>Booth</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cargo</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Cochrane qualitative and implementation methods group guidance series-paper 1: introduction</article-title><source>J Clin Epidemiol</source><year>2018</year><month>05</month><volume>97</volume><fpage>35</fpage><lpage>38</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2017.09.025</pub-id><pub-id pub-id-type="medline">29242094</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tong</surname><given-names>A</given-names> </name><name name-style="western"><surname>Flemming</surname><given-names>K</given-names> </name><name name-style="western"><surname>McInnes</surname><given-names>E</given-names> </name><name name-style="western"><surname>Oliver</surname><given-names>S</given-names> </name><name name-style="western"><surname>Craig</surname><given-names>J</given-names> </name></person-group><article-title>Enhancing transparency in reporting the synthesis of qualitative research: ENTREQ</article-title><source>BMC Med Res Methodol</source><year>2012</year><month>11</month><day>27</day><volume>12</volume><issue>1</issue><fpage>181</fpage><pub-id pub-id-type="doi">10.1186/1471-2288-12-181</pub-id><pub-id pub-id-type="medline">23185978</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Page</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>McKenzie</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Bossuyt</surname><given-names>PM</given-names> </name><etal/></person-group><article-title>The PRISMA 2020 statement: an updated guideline for reporting systematic reviews</article-title><source>BMJ</source><year>2021</year><month>03</month><day>29</day><volume>372</volume><fpage>n71</fpage><pub-id pub-id-type="doi">10.1136/bmj.n71</pub-id><pub-id pub-id-type="medline">33782057</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Entezarjou</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mundzic</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bogdanffy</surname><given-names>R</given-names> </name><collab>NIHR - National Institute for Health and Care Research</collab></person-group><article-title>Exploring patient and clinician perspectives on artificial intelligence in primary care - a qualitative systematic review</article-title><source>PROSPERO: International prospective register of systematic reviews</source><year>2024</year><access-date>2026-01-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.crd.york.ac.uk/prospero/display_record.php?ID=CRD42024505209">https://www.crd.york.ac.uk/prospero/display_record.php?ID=CRD42024505209</ext-link></comment></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cooke</surname><given-names>A</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>D</given-names> </name><name name-style="western"><surname>Booth</surname><given-names>A</given-names> </name></person-group><article-title>Beyond PICO: the SPIDER tool for qualitative evidence synthesis</article-title><source>Qual Health Res</source><year>2012</year><month>10</month><volume>22</volume><issue>10</issue><fpage>1435</fpage><lpage>1443</lpage><pub-id pub-id-type="doi">10.1177/1049732312452938</pub-id><pub-id pub-id-type="medline">22829486</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rethlefsen</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Kirtley</surname><given-names>S</given-names> </name><name name-style="western"><surname>Waffenschmidt</surname><given-names>S</given-names> </name><etal/></person-group><article-title>PRISMA-S: an extension to the PRISMA statement for reporting literature searches in systematic reviews</article-title><source>Syst Rev</source><year>2021</year><month>01</month><day>26</day><volume>10</volume><issue>1</issue><fpage>39</fpage><pub-id pub-id-type="doi">10.1186/s13643-020-01542-z</pub-id><pub-id pub-id-type="medline">33499930</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ouzzani</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hammady</surname><given-names>H</given-names> </name><name name-style="western"><surname>Fedorowicz</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Elmagarmid</surname><given-names>A</given-names> </name></person-group><article-title>Rayyan-a web and mobile app for systematic reviews</article-title><source>Syst Rev</source><year>2016</year><month>12</month><day>5</day><volume>5</volume><issue>1</issue><fpage>210</fpage><pub-id pub-id-type="doi">10.1186/s13643-016-0384-4</pub-id><pub-id pub-id-type="medline">27919275</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="web"><article-title>CASP checklist: CASP qualitative studies checklist</article-title><source>CASP - Critical Appraisal Skills Programme</source><year>2023</year><access-date>2026-01-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://casp-uk.net/checklists/casp-qualitative-studies-checklist-fillable.pdf">https://casp-uk.net/checklists/casp-qualitative-studies-checklist-fillable.pdf</ext-link></comment></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Braun</surname><given-names>V</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>V</given-names> </name></person-group><article-title>Using thematic analysis in psychology</article-title><source>Qual Res Psychol</source><year>2006</year><month>01</month><volume>3</volume><issue>2</issue><fpage>77</fpage><lpage>101</lpage><pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dhakal</surname><given-names>K</given-names> </name></person-group><article-title>NVivo</article-title><source>J Med Libr Assoc</source><year>2022</year><month>04</month><day>1</day><volume>110</volume><issue>2</issue><fpage>270</fpage><lpage>272</lpage><pub-id pub-id-type="doi">10.5195/jmla.2022.1271</pub-id><pub-id pub-id-type="medline">35440911</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thomas</surname><given-names>J</given-names> </name><name name-style="western"><surname>Harden</surname><given-names>A</given-names> </name></person-group><article-title>Methods for the thematic synthesis of qualitative research in systematic reviews</article-title><source>BMC Med Res Methodol</source><year>2008</year><month>07</month><day>10</day><volume>8</volume><issue>1</issue><fpage>45</fpage><pub-id pub-id-type="doi">10.1186/1471-2288-8-45</pub-id><pub-id pub-id-type="medline">18616818</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lewin</surname><given-names>S</given-names> </name><name name-style="western"><surname>Booth</surname><given-names>A</given-names> </name><name name-style="western"><surname>Glenton</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Applying GRADE-CERQual to qualitative evidence synthesis findings: introduction to the series</article-title><source>Implement Sci</source><year>2018</year><month>01</month><day>25</day><volume>13</volume><issue>Suppl 1</issue><fpage>2</fpage><pub-id pub-id-type="doi">10.1186/s13012-017-0688-3</pub-id><pub-id pub-id-type="medline">29384079</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lewin</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bohren</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rashidian</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Applying GRADE-CERQual to qualitative evidence synthesis findings-paper 2: how to make an overall CERQual assessment of confidence and create a Summary of Qualitative Findings table</article-title><source>Implement Sci</source><year>2018</year><month>01</month><day>25</day><volume>13</volume><issue>Suppl 1</issue><fpage>10</fpage><pub-id pub-id-type="doi">10.1186/s13012-017-0689-2</pub-id><pub-id pub-id-type="medline">29384082</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Munthe-Kaas</surname><given-names>H</given-names> </name><name name-style="western"><surname>Bohren</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Glenton</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Applying GRADE-CERQual to qualitative evidence synthesis findings-paper 3: how to assess methodological limitations</article-title><source>Implement Sci</source><year>2018</year><month>01</month><day>25</day><volume>13</volume><issue>Suppl 1</issue><fpage>9</fpage><pub-id pub-id-type="doi">10.1186/s13012-017-0690-9</pub-id><pub-id pub-id-type="medline">29384078</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Colvin</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Garside</surname><given-names>R</given-names> </name><name name-style="western"><surname>Wainwright</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Applying GRADE-CERQual to qualitative evidence synthesis findings-paper 4: how to assess coherence</article-title><source>Implement Sci</source><year>2018</year><month>01</month><day>25</day><volume>13</volume><issue>Suppl 1</issue><fpage>13</fpage><pub-id pub-id-type="doi">10.1186/s13012-017-0691-8</pub-id><pub-id pub-id-type="medline">29384081</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Glenton</surname><given-names>C</given-names> </name><name name-style="western"><surname>Carlsen</surname><given-names>B</given-names> </name><name name-style="western"><surname>Lewin</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Applying GRADE-CERQual to qualitative evidence synthesis findings-paper 5: how to assess adequacy of data</article-title><source>Implement Sci</source><year>2018</year><month>01</month><day>25</day><volume>13</volume><issue>Suppl 1</issue><fpage>14</fpage><pub-id pub-id-type="doi">10.1186/s13012-017-0692-7</pub-id><pub-id pub-id-type="medline">29384077</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Noyes</surname><given-names>J</given-names> </name><name name-style="western"><surname>Booth</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lewin</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Applying GRADE-CERQual to qualitative evidence synthesis findings-paper 6: how to assess relevance of the data</article-title><source>Implement Sci</source><year>2018</year><month>01</month><day>25</day><volume>13</volume><issue>Suppl 1</issue><fpage>4</fpage><pub-id pub-id-type="doi">10.1186/s13012-017-0693-6</pub-id><pub-id pub-id-type="medline">29384080</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davis</surname><given-names>M</given-names> </name><name name-style="western"><surname>Dysart</surname><given-names>GC</given-names> </name><name name-style="western"><surname>Doupnik</surname><given-names>SK</given-names> </name><etal/></person-group><article-title>Adolescent, parent, and provider perceptions of a predictive algorithm to identify adolescent suicide risk in primary care</article-title><source>Acad Pediatr</source><year>2024</year><volume>24</volume><issue>4</issue><fpage>645</fpage><lpage>653</lpage><pub-id pub-id-type="doi">10.1016/j.acap.2023.12.015</pub-id><pub-id pub-id-type="medline">38190885</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Litvin</surname><given-names>CB</given-names> </name><name name-style="western"><surname>Ornstein</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Wessell</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Nemeth</surname><given-names>LS</given-names> </name><name name-style="western"><surname>Nietert</surname><given-names>PJ</given-names> </name></person-group><article-title>Adoption of a clinical decision support system to promote judicious use of antibiotics for acute respiratory infections in primary care</article-title><source>Int J Med Inform</source><year>2012</year><month>08</month><volume>81</volume><issue>8</issue><fpage>521</fpage><lpage>526</lpage><pub-id pub-id-type="doi">10.1016/j.ijmedinf.2012.03.002</pub-id><pub-id pub-id-type="medline">22483528</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fraile Navarro</surname><given-names>D</given-names> </name><name name-style="western"><surname>Kocaballi</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Dras</surname><given-names>M</given-names> </name><name name-style="western"><surname>Berkovsky</surname><given-names>S</given-names> </name></person-group><article-title>Collaboration, not confrontation: understanding general practitioners&#x2019; attitudes towards natural language and text automation in clinical practice</article-title><source>ACM Trans Comput Hum Interact</source><year>2023</year><month>04</month><day>30</day><volume>30</volume><issue>2</issue><fpage>1</fpage><lpage>34</lpage><pub-id pub-id-type="doi">10.1145/3569893</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kocaballi</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Ijaz</surname><given-names>K</given-names> </name><name name-style="western"><surname>Laranjo</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Envisioning an artificial intelligence documentation assistant for future primary care consultations: a co-design study with general practitioners</article-title><source>J Am Med Inform Assoc</source><year>2020</year><month>11</month><day>1</day><volume>27</volume><issue>11</issue><fpage>1695</fpage><lpage>1704</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocaa131</pub-id><pub-id pub-id-type="medline">32845984</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shibl</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lawley</surname><given-names>M</given-names> </name><name name-style="western"><surname>Debuse</surname><given-names>J</given-names> </name></person-group><article-title>Factors influencing decision support system acceptance</article-title><source>Decis Support Syst</source><year>2013</year><month>01</month><volume>54</volume><issue>2</issue><fpage>953</fpage><lpage>961</lpage><pub-id pub-id-type="doi">10.1016/j.dss.2012.09.018</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Buck</surname><given-names>C</given-names> </name><name name-style="western"><surname>Doctor</surname><given-names>E</given-names> </name><name name-style="western"><surname>Hennrich</surname><given-names>J</given-names> </name><name name-style="western"><surname>J&#x00F6;hnk</surname><given-names>J</given-names> </name><name name-style="western"><surname>Eymann</surname><given-names>T</given-names> </name></person-group><article-title>General practitioners&#x2019; attitudes toward artificial intelligence-enabled systems: interview study</article-title><source>J Med Internet Res</source><year>2022</year><month>01</month><day>27</day><volume>24</volume><issue>1</issue><fpage>e28916</fpage><pub-id pub-id-type="doi">10.2196/28916</pub-id><pub-id pub-id-type="medline">35084342</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ahearn</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Kerr</surname><given-names>SJ</given-names> </name></person-group><article-title>General practitioners&#x2019; perceptions of the pharmaceutical decision-support tools in their prescribing software</article-title><source>Med J Aust</source><year>2003</year><month>07</month><day>7</day><volume>179</volume><issue>1</issue><fpage>34</fpage><lpage>37</lpage><pub-id pub-id-type="doi">10.5694/j.1326-5377.2003.tb05415.x</pub-id><pub-id pub-id-type="medline">12831382</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Allen</surname><given-names>MR</given-names> </name><name name-style="western"><surname>Webb</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mandvi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Frieden</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tai-Seale</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kallenberg</surname><given-names>G</given-names> </name></person-group><article-title>Navigating the doctor-patient-AI relationship - a mixed-methods study of physician attitudes toward artificial intelligence in primary care</article-title><source>BMC Prim Care</source><year>2024</year><month>01</month><day>27</day><volume>25</volume><issue>1</issue><fpage>42</fpage><pub-id pub-id-type="doi">10.1186/s12875-024-02282-y</pub-id><pub-id pub-id-type="medline">38281026</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nash</surname><given-names>DM</given-names> </name><name name-style="western"><surname>Thorpe</surname><given-names>C</given-names> </name><name name-style="western"><surname>Brown</surname><given-names>JB</given-names> </name><etal/></person-group><article-title>Perceptions of artificial intelligence use in primary care: a qualitative study with providers and staff of Ontario community health centres</article-title><source>J Am Board Fam Med</source><year>2023</year><month>04</month><day>3</day><volume>36</volume><issue>2</issue><fpage>221</fpage><lpage>228</lpage><pub-id pub-id-type="doi">10.3122/jabfm.2022.220177R2</pub-id><pub-id pub-id-type="medline">36948536</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Upshaw</surname><given-names>TL</given-names> </name><name name-style="western"><surname>Craig-Neil</surname><given-names>A</given-names> </name><name name-style="western"><surname>Macklin</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Priorities for artificial intelligence applications in primary care: a Canadian deliberative dialogue with patients, providers, and health system leaders</article-title><source>J Am Board Fam Med</source><year>2023</year><month>04</month><day>3</day><volume>36</volume><issue>2</issue><fpage>210</fpage><lpage>220</lpage><pub-id pub-id-type="doi">10.3122/jabfm.2022.220171R1</pub-id><pub-id pub-id-type="medline">36948537</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Libon</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ng</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bailey</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hareendranathan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Joseph</surname><given-names>R</given-names> </name><name name-style="western"><surname>Dulai</surname><given-names>S</given-names> </name></person-group><article-title>Remote diagnostic imaging using artificial intelligence for diagnosing hip dysplasia in infants: results from a mixed-methods feasibility pilot study</article-title><source>Paediatr Child Health</source><year>2023</year><month>08</month><volume>28</volume><issue>5</issue><fpage>285</fpage><lpage>290</lpage><pub-id pub-id-type="doi">10.1093/pch/pxad013</pub-id><pub-id pub-id-type="medline">37484038</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sangers</surname><given-names>TE</given-names> </name><name name-style="western"><surname>Wakkee</surname><given-names>M</given-names> </name><name name-style="western"><surname>Moolenburgh</surname><given-names>FJ</given-names> </name><name name-style="western"><surname>Nijsten</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lugtenberg</surname><given-names>M</given-names> </name></person-group><article-title>Towards successful implementation of artificial intelligence in skin cancer care: a qualitative study exploring the views of dermatologists and general practitioners</article-title><source>Arch Dermatol Res</source><year>2023</year><month>07</month><volume>315</volume><issue>5</issue><fpage>1187</fpage><lpage>1195</lpage><pub-id pub-id-type="doi">10.1007/s00403-022-02492-3</pub-id><pub-id pub-id-type="medline">36477587</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Helenason</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ekstr&#x00F6;m</surname><given-names>C</given-names> </name><name name-style="western"><surname>Falk</surname><given-names>M</given-names> </name><name name-style="western"><surname>Papachristou</surname><given-names>P</given-names> </name></person-group><article-title>Exploring the feasibility of an artificial intelligence based clinical decision support system for cutaneous melanoma detection in primary care - a mixed method study</article-title><source>Scand J Prim Health Care</source><year>2024</year><month>03</month><volume>42</volume><issue>1</issue><fpage>51</fpage><lpage>60</lpage><pub-id pub-id-type="doi">10.1080/02813432.2023.2283190</pub-id><pub-id pub-id-type="medline">37982736</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kurniawan</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Handiyani</surname><given-names>H</given-names> </name><name name-style="western"><surname>Nuraini</surname><given-names>T</given-names> </name><name name-style="western"><surname>Hariyati</surname><given-names>RTS</given-names> </name><name name-style="western"><surname>Sutrisno</surname><given-names>S</given-names> </name></person-group><article-title>A systematic review of artificial intelligence-powered (AI-powered) chatbot intervention for managing chronic illness</article-title><source>Ann Med</source><year>2024</year><month>12</month><volume>56</volume><issue>1</issue><fpage>2302980</fpage><pub-id pub-id-type="doi">10.1080/07853890.2024.2302980</pub-id><pub-id pub-id-type="medline">38466897</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Parikh</surname><given-names>RB</given-names> </name><name name-style="western"><surname>Teeple</surname><given-names>S</given-names> </name><name name-style="western"><surname>Navathe</surname><given-names>AS</given-names> </name></person-group><article-title>Addressing bias in artificial intelligence in health care</article-title><source>JAMA</source><year>2019</year><month>12</month><day>24</day><volume>322</volume><issue>24</issue><fpage>2377</fpage><lpage>2378</lpage><pub-id pub-id-type="doi">10.1001/jama.2019.18058</pub-id><pub-id pub-id-type="medline">31755905</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hogg</surname><given-names>HDJ</given-names> </name><name name-style="western"><surname>Al-Zubaidy</surname><given-names>M</given-names> </name><collab>Technology Enhanced Macular Services Study Reference Group</collab><etal/></person-group><article-title>Stakeholder perspectives of clinical artificial intelligence implementation: systematic review of qualitative evidence</article-title><source>J Med Internet Res</source><year>2023</year><month>01</month><day>10</day><volume>25</volume><fpage>e39742</fpage><pub-id pub-id-type="doi">10.2196/39742</pub-id><pub-id pub-id-type="medline">36626192</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kueper</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Terry</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Zwarenstein</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lizotte</surname><given-names>DJ</given-names> </name></person-group><article-title>Artificial intelligence and primary care research: a scoping review</article-title><source>Ann Fam Med</source><year>2020</year><month>05</month><volume>18</volume><issue>3</issue><fpage>250</fpage><lpage>258</lpage><pub-id pub-id-type="doi">10.1370/afm.2518</pub-id><pub-id pub-id-type="medline">32393561</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morley</surname><given-names>J</given-names> </name><name name-style="western"><surname>Murphy</surname><given-names>L</given-names> </name><name name-style="western"><surname>Mishra</surname><given-names>A</given-names> </name><name name-style="western"><surname>Joshi</surname><given-names>I</given-names> </name><name name-style="western"><surname>Karpathakis</surname><given-names>K</given-names> </name></person-group><article-title>Governing data and artificial intelligence for health care: developing an international understanding</article-title><source>JMIR Form Res</source><year>2022</year><month>01</month><day>31</day><volume>6</volume><issue>1</issue><fpage>e31623</fpage><pub-id pub-id-type="doi">10.2196/31623</pub-id><pub-id pub-id-type="medline">35099403</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Murdoch</surname><given-names>B</given-names> </name></person-group><article-title>Privacy and artificial intelligence: challenges for protecting health information in a new era</article-title><source>BMC Med Ethics</source><year>2021</year><month>09</month><day>15</day><volume>22</volume><issue>1</issue><fpage>122</fpage><pub-id pub-id-type="doi">10.1186/s12910-021-00687-3</pub-id><pub-id pub-id-type="medline">34525993</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="confproc"><person-group person-group-type="editor"><name name-style="western"><surname>Sides</surname><given-names>T</given-names> </name><name name-style="western"><surname>Farrell</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kbaier</surname><given-names>D</given-names> </name></person-group><article-title>Understanding the acceptance of artificial intelligence in primary care</article-title><year>2023</year><access-date>2026-01-22</access-date><conf-name>HCI International 2023 Posters - 25th International Conference on Human-Computer Interaction, HCII 2023</conf-name><conf-date>Jul 23-28, 2023</conf-date><comment><ext-link ext-link-type="uri" xlink:href="https://researchr.org/publication/hci-2023-43">https://researchr.org/publication/hci-2023-43</ext-link></comment></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="web"><article-title>EU AI act: first regulation on artificial intelligence</article-title><source>European Parliament</source><year>2023</year><access-date>2026-01-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.europarl.europa.eu/topics/en/article/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence">https://www.europarl.europa.eu/topics/en/article/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence</ext-link></comment></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="web"><article-title>Artificial intelligence in software as a medical device</article-title><source>US Food and Drug Administration</source><year>2025</year><access-date>2026-01-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.fda.gov/medical-devices/software-medical-device-samd/artificial-intelligence-and-machine-learning-software-medical-device">https://www.fda.gov/medical-devices/software-medical-device-samd/artificial-intelligence-and-machine-learning-software-medical-device</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Search strategy.</p><media xlink:href="ai_v5i1e72210_app1.doc" xlink:title="DOC File, 39 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Codebook and themes.</p><media xlink:href="ai_v5i1e72210_app2.docx" xlink:title="DOCX File, 40 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Critical Appraisal Skills Programme (CASP) checklist for Qualitative Research.</p><media xlink:href="ai_v5i1e72210_app3.doc" xlink:title="DOC File, 53 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Evidence profile table of the Grading of Recommendations Assessment, Development, and Evaluation (GRADE)-Confidence in the Evidence from Reviews of Qualitative Research (CERQual) assessments.</p><media xlink:href="ai_v5i1e72210_app4.docx" xlink:title="DOCX File, 31 KB"/></supplementary-material><supplementary-material id="app5"><label>Checklist 1</label><p>Enhancing transparency in reporting the synthesis of qualitative research (ENTREQ) checklist.</p><media xlink:href="ai_v5i1e72210_app5.docx" xlink:title="DOCX File, 22 KB"/></supplementary-material><supplementary-material id="app6"><label>Checklist 2</label><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) checklist.</p><media xlink:href="ai_v5i1e72210_app6.doc" xlink:title="DOC File, 326 KB"/></supplementary-material><supplementary-material id="app7"><label>Checklist 3</label><p>PRISMA-S checklist.</p><media xlink:href="ai_v5i1e72210_app7.docx" xlink:title="DOCX File, 17 KB"/></supplementary-material></app-group></back></article>