<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR AI</journal-id><journal-id journal-id-type="publisher-id">ai</journal-id><journal-id journal-id-type="index">41</journal-id><journal-title>JMIR AI</journal-title><abbrev-journal-title>JMIR AI</abbrev-journal-title><issn pub-type="epub">2817-1705</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v5i1e82351</article-id><article-id pub-id-type="doi">10.2196/82351</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Legal and Ethical Challenges in Integrating AI Into Clinical Practice: Qualitative Study of Physicians&#x2019; Real-World Experiences</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Mostafapour</surname><given-names>Mehrnaz</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Fortier</surname><given-names>Jacqueline</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Pacheco</surname><given-names>Karen</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Murray</surname><given-names>Heather</given-names></name><degrees>MSc, MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Garber</surname><given-names>Gary</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff3">3</xref></contrib></contrib-group><aff id="aff1"><institution>The Canadian Medical Protective Association</institution><addr-line>875 Carling Ave Suite 323</addr-line><addr-line>Ottawa</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff2"><institution>Department of Emergency Medicine, Queen's University</institution><addr-line>Kingston</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff3"><institution>Department of Medicine and the School of Public Health and Epidemiology, Faculty of Medicine, University of Ottawa</institution><addr-line>Ottawa</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Dankar</surname><given-names>Fida</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Reiter</surname><given-names>Ehud</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Martinez-Angulo</surname><given-names>Pablo</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Mehrnaz Mostafapour, PhD, The Canadian Medical Protective Association, 875 Carling Ave Suite 323, Ottawa, ON, K1S 5P1, Canada, 1 (613) 725-2000; <email>mmostafapour@cmpa.org</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>31</day><month>3</month><year>2026</year></pub-date><volume>5</volume><elocation-id>e82351</elocation-id><history><date date-type="received"><day>13</day><month>08</month><year>2025</year></date><date date-type="rev-recd"><day>14</day><month>01</month><year>2026</year></date><date date-type="accepted"><day>16</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Mehrnaz Mostafapour, Jacqueline Fortier, Karen Pacheco, Heather Murray, Gary Garber. Originally published in JMIR AI (<ext-link ext-link-type="uri" xlink:href="https://ai.jmir.org">https://ai.jmir.org</ext-link>), 31.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.ai.jmir.org/">https://www.ai.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://ai.jmir.org/2026/1/e82351"/><abstract><sec><title>Background</title><p>The adoption of artificial intelligence (AI) in health care has accelerated; however, physicians continue to face substantial legal, ethical, and regulatory uncertainties when considering AI integration into clinical practice. Although the literature on AI in health care is expanding, there is limited insight into the real-world concerns voiced by clinicians navigating these uncharted territories.</p></sec><sec><title>Objective</title><p>This study aimed to explore the legal and ethical uncertainties raised by Canadian physicians in relation to AI use in clinical care, using actual medicolegal advice requests as a window into their practical concerns.</p></sec><sec sec-type="methods"><title>Methods</title><p>We conducted a comprehensive thematic analysis of 46 medicolegal advice cases made by physicians to a national medicolegal advisory service between March 2023 and February 2025. The cases were analyzed to identify key themes and patterns in physicians&#x2019; questions and perceived risks regarding AI tools in clinical contexts.</p></sec><sec sec-type="results"><title>Results</title><p>Eight key themes emerged, including the use of AI scribes, data privacy and security, patient consent, data ownership, regulatory uncertainty, medicolegal liability, vendor agreements, and concerns about accuracy and bias. Many of the inquiries focused on administrative and documentation-related AI applications rather than on diagnostic tools, reflecting the current stage of AI integration in everyday clinical workflows. Physicians expressed uncertainty regarding legal responsibility, alignment with privacy laws, and appropriate communication with patients about AI use.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>This study offers unique insight into frontline physicians&#x2019; real-time concerns about AI, highlighting the need for clearer regulatory guidance, clinical standards, and legal frameworks to support safe and ethical AI adoption in health care.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>AI</kwd><kwd>medicolegal</kwd><kwd>ethics</kwd><kwd>physicians</kwd><kwd>clinical practice</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The integration of artificial intelligence (AI) into health care represents a major shift, offering the potential to improve diagnostic accuracy, enhance efficiency, and support more personalized care [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. However, despite years of development and early-stage implementation, adoption in clinical practice remains inconsistent. Ethical, regulatory, and medicolegal uncertainties continue to limit the widespread use of AI in health care [<xref ref-type="bibr" rid="ref3">3</xref>]. Physicians, as one of the primary users of these tools, face substantial uncertainty as they try to navigate the use of AI within systems where legal frameworks and professional guidelines have not kept pace with technological progress [<xref ref-type="bibr" rid="ref4">4</xref>].</p><p>Previous studies have identified a wide range of barriers to AI adoption, including technical challenges, organizational obstacles, and cultural resistance within clinical environments [<xref ref-type="bibr" rid="ref3">3</xref>]. Concerns about the accuracy, reliability, and robustness of AI-generated results remain major barriers to adoption in health care. While some models approach the diagnostic performance of physicians, their accuracy is often inconsistent [<xref ref-type="bibr" rid="ref5">5</xref>]. Many AI tools lack validation in real-world settings, raising concerns about their generalizability and safety [<xref ref-type="bibr" rid="ref6">6</xref>]. Physicians are particularly wary of the <italic>black box</italic> nature of machine learning [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref7">7</xref>], which obscures decision-making processes and hinders trust [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Concerns about algorithmic bias, data security, and the absence of transparent validation further contribute to clinicians&#x2019; hesitation to rely on these tools [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref10">10</xref>].</p><p>Legal and regulatory uncertainty continues to hinder AI adoption. In Canada, the lack of AI-specific legislation puts physicians in a situation where they must consider applying existing laws, including those related to civil liability, privacy, and human rights. Courts may continue to focus on traditional defendants (ie, clinicians and hospitals), potentially leaving health care providers responsible for AI-related failures [<xref ref-type="bibr" rid="ref11">11</xref>]. Questions remain about patient consent, data sharing, and transparency obligations under current federal and provincial privacy laws, such as the federal Personal Information Protection and Electronic Documents Act and provincial and territorial privacy statutes. These laws apply broadly to the collection, primary use, secondary use, disclosure, and deidentification of personal health information, as well as to situations involving cross-border data transfers and third-party vendor contracts [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. This fragmented framework increases compliance challenges, particularly for AI tools using cloud-based infrastructure located in other jurisdictions [<xref ref-type="bibr" rid="ref3">3</xref>].</p><p>Although previous research has explored barriers to AI adoption primarily through general surveys and theoretical models [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref3">3</xref>], fewer studies have examined the practical, real-world challenges physicians face when AI is introduced into the day-to-day realities of medical practice. Little is known about the specific operational, legal, and professional dilemmas that arise at the point of care. This study addresses that gap by analyzing questions and concerns submitted by physicians to a medicolegal helpline, offering direct insight into the issues clinicians encounter in practice. Grounding our analysis in these real-world inquiries allows us to identify areas where policy, education, and AI development could better align with physicians&#x2019; legal obligations, professional standards, and patient safety.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design and Setting</title><p>We conducted a thematic analysis of physician cases related to the use of AI in clinical practice, submitted to the Canadian Medical Protective Association&#x2019;s (CMPA) medicolegal helpline between March 1, 2023, and February 28, 2025. The CMPA, a national medicolegal defense organization serving about 95% of practicing physicians in Canada, provides members with access to medicolegal advice and support through a dedicated helpline. During this period, CMPA physician advisors received 46 cases from members seeking guidance or raising concerns about the use of AI in their practice. These cases offered insights into physicians&#x2019; medicolegal and practice-related concerns and considerations regarding AI.</p></sec><sec id="s2-2"><title>Study Population</title><p>Practicing physicians who contacted the CMPA for advice on the use of AI in their practice during the study period were included in the study.</p></sec><sec id="s2-3"><title>Data Source</title><p>Data collection from CMPA cases has been described previously [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. When physicians contact the CMPA, a physician advisor documents the purpose of the call and relevant information in a written memo. These memos are structured narrative summaries that capture the reason for the call, relevant clinical or practice context, and the physician&#x2019;s questions or concerns, but they are not verbatim transcripts and contain no direct quotations. For this study, we analyzed memos from advice cases concerning the use of AI in clinical practice during the study period.</p><p>The unit of analysis was the coded medicolegal advice case, defined as 1 or more calls documented within a single advice file; therefore, the same physician could contribute more than 1 advice case over the study period. CMPA recorded 12,135 coded medicolegal advice cases during this period, of which 275 were coded as technology issues related to practice. Within this group, 46 cases met our definition of AI-related advice cases and were included in the analysis. AI-related cases were identified by reviewing physician advisor notes and selecting those in which the physician&#x2019;s primary questions concerned AI tools used in clinical practice. For this study, we included only tools explicitly described as &#x201C;artificial intelligence&#x201D; (eg, AI scribes with speech recognition and generative summarization, large language model&#x2013;based applications, or AI-supported decision tools) and excluded templating or basic speech-to-text systems without machine learning or generative components.</p></sec><sec id="s2-4"><title>Data Analysis</title><p>We conducted an inductive thematic analysis of advice case memos, following the approach described by Braun and Clarke [<xref ref-type="bibr" rid="ref14">14</xref>]. Our goal was to identify themes related to physicians&#x2019; questions and concerns about the use of AI in clinical practice, as documented by physician advisors at the CMPA.</p><p>We attended to how physicians&#x2019; questions and perceived risks were described in the cases. For example, an excerpt in which a physician was &#x201C;unsure whether they needed explicit patient permission before using the AI scribe&#x201D; was coded as &#x201C;uncertainty/question about consent process,&#x201D; grouped under the subtheme &#x201C;scope of consent for AI scribes,&#x201D; within the broader theme &#x201C;patient consent and communication.&#x201D;</p><p>The data consisted of physician advisor summaries rather than verbatim transcripts and contained no direct quotations. Our analysis was based on the physicians&#x2019; questions and perceived risks, as documented in these second-order summaries.</p><p>One researcher (MM) conducted the initial coding of the entire dataset (46 cases) to develop a preliminary set of codes and build familiarity with the data. To support analytic calibration and clarify code definitions, a second researcher independently coded a subset (n=23, 50%) of cases. Following line-by-line comparison, percent agreement [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>] was 97%, indicating a high level of concordance at this calibration stage. Coding differences were resolved through discussion and refinement of code definitions, resulting in a shared coding framework. The primary coder then recoded the full dataset using this refined framework to ensure consistent application across all cases. Throughout the process, codes were iteratively reviewed and grouped into subthemes and overarching themes through ongoing discussion between the researchers.</p><p>Theme labels were primarily data-led and then refined using terminology consistent with medicolegal and AI ethics literature. The primary coder had a background in engineering and public health, and the secondary coder had a background in science and biology; both had more than 3 years of work experience in a medicolegal organization. After the coding framework and themes were developed, we sought feedback from CMPA&#x2019;s legal advisory board to ensure that the themes and subthemes were consistent with current medicolegal principles. This process led to minor wording adjustments but no substantive changes to the analysis.</p></sec><sec id="s2-5"><title>Ethical Considerations</title><p>The ethics review panel of the Advarra Institutional Review Board provided ethics approval for the conduct of this study (protocol number for secondary use of data 00020829). Before analysis, the memos were deidentified in accordance with organizational guidelines by removing or generalizing names, cities, and specific organizations (eg, referring to provincial regulatory authorities as &#x201C;the College&#x201D;). The medicolegal memos did not contain patient-identifiable information, and no patient-identifiable data were included in the research dataset. The deidentified data were then analyzed and reported only in aggregated form to minimize any residual risk of reidentification. Deidentified data were stored in CMPA&#x2019;s secure data hub for a minimum of 10 years in accordance with CMPA&#x2019;s retention and disposition policy. The compensation to participants was not applicable to this study, as it was a retrospective study. Members of the CMPA are informed at enrollment that their deidentified information may be used for internal research, with an option to opt out, and the Advarra Institutional Review Board approved this minimal-risk secondary use without requiring additional individual consent.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>A total of 46 AI-related cases were received by CMPA during the study period, with 22 (47.8%) cases in 2023 and 24 (52.2%) cases in 2024 (no AI-related cases were recorded in January 2025 or February 2025); in total, 40 (87%) cases were from family medicine specialists. Physicians contacted the service from multiple provinces, with most cases (n=18, 39.1%) originating from Ontario, Canada&#x2019;s most populous province. This was followed by Quebec at 10 (21.7%), and British Columbia at 8 (17.4%). Alberta contributed 6 (13%) cases. Cases from Manitoba, Saskatchewan, New Brunswick, Nova Scotia, Newfoundland and Labrador, and the Northwest Territories of Canada together accounted for 4 (8.7%) AI-related advice cases.</p></sec><sec id="s3-2"><title>Thematic Analysis of the Advice Cases</title><p>The thematic analysis of the advice cases identified 8 distinct themes, reflecting physicians&#x2019; questions and concerns regarding the use of AI in clinical practice. These themes ranged from technical and regulatory considerations to ethical and medicolegal concerns. <xref ref-type="fig" rid="figure1">Figure 1</xref> outlines each theme alongside its frequency of occurrence within the dataset. Several cases encompassed multiple themes, highlighting physicians&#x2019; concerns and uncertainty about navigating the medicolegal and practical challenges of using AI in clinical care.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Themes and their frequency of occurrence in the dataset (N=46). AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e82351_fig01.png"/></fig></sec><sec id="s3-3"><title>Implementation of AI Scribes in Clinical Practice</title><p>Physicians frequently sought guidance on integrating AI tools, particularly for scribing support, into their clinical workflows. Some inquired about using AI scribes to streamline note-taking and reduce the time spent on documentation. Others focused on clarifying the legal implications and professional accountability associated with AI-generated notes. There were also questions about potential risks, including inaccuracies in AI-transcribed records and the possibility of overreliance on automated documentation.</p></sec><sec id="s3-4"><title>Privacy, Confidentiality, and Data Security</title><p>Concerns about patient data protection and privacy requirements were a common theme. Physicians inquired about safeguarding patient information when using AI tools, especially when data handling involved servers or data processing activities located outside Canada. For example, some cases involved physicians developing AI tools who had questions about confidentiality and compliance with Canadian privacy laws. Other cases focused on privacy concerns related to the use of existing AI tools to manage electronic medical records (EMRs).</p></sec><sec id="s3-5"><title>Patient Consent and Communication</title><p>Physicians expressed uncertainty about the requirements for obtaining patient consent when using AI in clinical care. Questions addressed whether consent was necessary, whether it should be verbal or written, and how to proceed if consent had not been obtained. Some cases focused on the potential medicolegal risks of using AI tools without explicit consent, while others raised concerns about specific applications, such as AI-based transcription programs, and how these might affect the accuracy of medical records and related legal implications.</p></sec><sec id="s3-6"><title>Data Retention and Custodianship</title><p>Physicians raised questions about how AI-generated data should be stored and the appropriate length of retention. Inquiries sought clarification on data retention obligations and the risks of storing information on servers located outside Canada. Some cases questioned whether draft recordings or documents created outside the EMR were considered part of the medical record and whether physicians were required to provide them upon request. Others expressed concern about the risks of collecting and processing patient data on AI servers based in other countries.</p></sec><sec id="s3-7"><title>Regulatory Compliance and Professional Standards</title><p>Physicians sought clarification on how AI use aligned with professional standards and legal requirements. Questions included whether adherence to foreign laws or regulations, such as HIPAA (Health Insurance Portability and Accountability Act), met their obligations under Canadian law and whether internal processes and contracts met the expectations of regulatory bodies such as provincial and territorial medical regulatory authorities (Colleges). For example, physicians questioned whether a HIPAA-compliant, AI-supported Subjective, Objective, Assessment, and Plan note system would also meet Canadian privacy legislation requirements. Others inquired whether their user agreements for developing AI tools for clinical use were sufficient to meet applicable regulatory requirements. Physicians raised concerns about both electronic health record&#x2013;embedded AI tools and stand-alone software as a service products used alongside the EMR; however, the memos did not provide enough detail to systematically distinguish concerns by vendor type, and callers did not reference specific provincial or College guidance by name.</p></sec><sec id="s3-8"><title>Medicolegal Risks and Liability</title><p>Physicians expressed concerns about potential legal risks associated with AI tools, including the possibility of negligence claims, College complaints, and broader medicolegal exposure. Some sought advice after developing AI-like tools, asking whether their use could create legal liability. Others raised concerns about the use of AI software in walk-in clinics to gather patient information and the associated medicolegal implications.</p></sec><sec id="s3-9"><title>Vendor Contracts and Legal Considerations</title><p>Physicians questioned whether they should engage legal authorities to review contractual terms and ensure compliance with privacy and professional standards. Physicians wanted to ensure that their template legal agreement adequately addressed risks associated with processing patient data. Some, eager to implement an AI service, sought guidance on resources for evaluating a vendor&#x2019;s contract and ensuring its legal soundness.</p><p>Within this theme, physicians most often sought advice on whether their contracts needed to address the following: who would own and be custodian of both source and AI-generated data; where data would be stored or processed, including whether cross-border transfers created additional medicolegal risk; how long data would be retained and under what conditions it would be deleted or returned; whether, and to what extent, they should be concerned about third-party access to the data and the safeguards governing such access; and whether provisions allocating responsibility and indemnity would adequately protect them in the event of a privacy breach or AI-related error. These questions were framed in terms of how such clauses might affect the physician&#x2019;s medicolegal risk.</p></sec><sec id="s3-10"><title>Accuracy, Reliability, and Quality Assurance</title><p>Some physicians expressed concerns about the accuracy and reliability of AI-generated documentation, including the potential for misinterpretation or bias in clinical notes. Physicians sought guidance on using an AI-assisted application for clinical notes and raised specific concerns about the risks of misinterpretation or bias in AI-generated transcriptions. Although this concern appeared less frequently, it highlights physicians&#x2019; awareness of the clinical risks associated with overreliance on AI tools.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This qualitative analysis of physicians&#x2019; inquiries to the CMPA provides valuable insight into the uncertainties and perceived risks clinicians face when considering the adoption of AI tools in clinical practice. While physicians recognized the potential of AI to streamline documentation, improve workflow efficiency, and support various aspects of patient care, they also voiced significant concerns about privacy, patient consent, data governance, and medicolegal liability. Compared with existing medicolegal literature, our findings both confirm and extend previous work. Survey and interview studies show that clinicians are broadly concerned about medicolegal liability, privacy risks, and the possibility of becoming &#x201C;liability sinks&#x201D; when using AI tools [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Our real-world data support these concerns but also provide more operational detail than previous studies. Rather than expressing only general worries, physicians asked concrete questions about contractual vulnerabilities, such as whether vendor agreements adequately addressed cross-border data processing, indemnity, and compliance with Canadian privacy law. This level of detail has not been reported in previous clinician surveys or qualitative interviews and represents a key contribution of this practice-based dataset [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref16">16</xref>].</p><p>Our analysis found that implementation-related inquiries dominated the cases, particularly those focusing on AI scribes. This suggests a growing interest in solutions to mitigate administrative burden, aligning with recent evidence indicating that AI scribes can alleviate documentation workload and reduce physician burnout. A 2024 rapid review reported reductions in both mental demand and frustration when AI scribes were used, suggesting tangible benefits for clinical efficiency [<xref ref-type="bibr" rid="ref17">17</xref>]. Despite this promise, the cases in our study reflected physicians&#x2019; persistent uncertainty about the practical steps of implementation, including how to integrate AI into existing workflows and how to manage potential medicolegal implications. These concerns were also acknowledged in the same review, which emphasized the need for thoughtful planning, clinician training, and technical support to ensure safe and effective AI integration [<xref ref-type="bibr" rid="ref17">17</xref>]. Our findings also diverge from the literature regarding consent for AI scribes. Although several Colleges now mandate informed consent for AI-supported documentation, previous studies have not explored how clinicians implement these requirements in practice.</p><p>Patient-centered research shows that patients expect to be informed about AI use and may view such disclosures as equally important as traditional risk discussions [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>]. However, patient perspectives are shaped by factors such as gender, age, income, and health status [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref20">20</xref>], complicating efforts to establish standard consent processes and highlighting the need for tailored communication. Many medical regulatory bodies, including the College of Physicians and Surgeons of Nova Scotia, mandate consent for recordings involving AI scribes [<xref ref-type="bibr" rid="ref21">21</xref>]. However, despite emerging guidance, gaps remain in defining comprehensive consent practices for AI, particularly as patient apprehensions about safety, choice, and data security persist [<xref ref-type="bibr" rid="ref19">19</xref>]. Our findings highlight ongoing uncertainty among physicians and the critical need for clearer, practice-oriented frameworks to support consent in AI integration, suggesting a gap between regulatory expectations and frontline understanding that has not been described in previous medicolegal or implementation studies [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref22">22</xref>].</p><p>Physicians&#x2019; cases also demonstrated concern regarding privacy and data security, particularly about the handling of patient information by AI systems, cross-border data transfers, and reliance on servers outside Canada. Comparative analyses of privacy frameworks in Canada, the United States, and other jurisdictions show cross-border data flow as a critical regulatory issue [<xref ref-type="bibr" rid="ref18">18</xref>]. Furthermore, physicians&#x2019; distinction between the US HIPAA compliance and Canadian privacy law highlights their nuanced understanding of professional and legal obligations and the limitations of relying solely on international frameworks [<xref ref-type="bibr" rid="ref23">23</xref>]. However, there remains a need for clear guidelines and policies to address similar problems.</p><p>Questions about data governance and custodianship revealed areas of uncertainty, particularly concerning the legal status of draft notes and recordings generated by AI systems. While existing literature discusses data governance at a high level, it does not address whether transient AI-generated artifacts constitute part of the medical record or trigger retention obligations.</p><p>Physicians were unclear about whether such records fall under their record-keeping responsibilities, an ambiguity that reflects wider regulatory gaps in Canadian health care. Existing analyses of AI regulation in Canada have highlighted that, alongside safety considerations, AI technologies raise unresolved questions about data ownership and retention [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. Our analysis surfaces these previously underreported uncertainties, highlighting the added value of real-time physician inquiries in identifying emergent medicolegal challenges not visible in survey-based or interview-based studies.</p><p>Medicolegal risk and liability associated with AI systems emerged as another prominent theme, with physicians expressing concern about responsibility for AI-related errors and regulatory breaches. Although current case law provides limited guidance, the potential for legal exposure remains a significant deterrent to adoption. Our cases reflected a focus on medicolegal risks rather than technical issues (eg, reliability or accuracy), likely because the CMPA is a medicolegal organization. Our results suggest that legal and operational uncertainties are a barrier to AI integration in physicians&#x2019; clinical practice. Most of the cases in our study came from family medicine specialists, and most of the AI tools they sought advice about were administrative rather than clinical, such as AI scribes. For this reason, themes centered more on issues related to patient privacy and consent for the use of AI tools than on issues related to diagnostic performance. We observed only a few cases about diagnostic AI technologies and issues related to diagnostic accuracy, reliability, and bias, which may be due to fewer cases from specialties such as radiology in our dataset. We believe this focus is reflective of the types of cases we received and may not be generalizable. Nevertheless, issues related to diagnostic accuracy, reliability, and bias remain critical and should be addressed through regulation and standardization as AI becomes more integrated into health care.</p><p>Addressing these concerns requires coordinated action. Physicians and health care institutions should establish formal evaluation processes for AI tools, ensuring that privacy protections, consent mechanisms, and data governance policies are firmly in place before adoption [<xref ref-type="bibr" rid="ref25">25</xref>]. Communication strategies should be developed to help physicians explain AI use clearly to patients, with sensitivity to demographic variations in preferences [<xref ref-type="bibr" rid="ref18">18</xref>].</p><p>Regulatory bodies have a pivotal role in supporting safe AI adoption. Clear, AI-specific privacy guidelines should address the complexities of cross-border data transfers and third-party data access. Standardized consent frameworks are also essential, balancing patient autonomy with practical clinical workflows. Moreover, regulatory interpretations of safety must expand to encompass AI-specific risks, including algorithmic bias and opaque decision-making processes. Educational resources should be developed to guide physicians in understanding how AI use intersects with existing professional obligations [<xref ref-type="bibr" rid="ref26">26</xref>].</p><p>For technology developers, there should be a clear mandate to design AI tools that align with Canadian privacy law and support physicians&#x2019; ethical and professional responsibilities. Features that document consent, ensure transparent data handling, and provide robust error detection will be critical to facilitating safe integration into clinical practice.</p><p>In summary, our study is among the first exploratory efforts to highlight the multifaceted concerns of Canadian physicians as they navigate the complexities of AI adoption in health care, grounded in their real, day-to-day experiences. It identifies gaps in guidance and technology design essential to support the responsible integration of AI into clinical practice. Our findings suggest that, before adopting AI scribes or similar tools, physicians and organizations should consider the following governance questions: who is the data controller or custodian for both source and AI-generated data; what types of data are retained (eg, audio files, draft transcripts, and final notes) and for how long; where data are stored and processed, including any cross-border transfers; how patient consent is obtained and evidenced in the record (verbal vs written); how errors, bias, reliability, and accuracy in AI outputs are monitored and addressed; and which contractual clauses (eg, privacy safeguards, breach notification, data return or destruction, and indemnity) are in place to mitigate medicolegal risk.</p></sec><sec id="s4-2"><title>Limitations and Future Directions</title><p>While this study provides timely insights into physicians&#x2019; concerns about AI adoption, it is limited to inquiries submitted to a single national medicolegal advisory service and predominantly from family medicine specialists. As such, it may not fully capture the dynamics in larger institutions, such as hospitals or health systems, where technology adoption decisions are often made at an organizational level rather than by individual practitioners. Most advice cases concerned documentation-focused AI scribes and related administrative tools, and advisor memos did not consistently provide sufficient technical detail to distinguish transcription-only systems from AI scribes with generative functions. Because the data consist of advisor-written summaries rather than verbatim transcripts, medicolegal documentation conventions may foreground risk and liability and underrepresent clinical nuance or emotional tone, which may have influenced the themes identified. Given that our dataset included only AI-related concerns, future research could compare AI-related inquiries with non-AI software inquiries (eg, image-recording or EMR systems) to assess whether consent and privacy issues differ across technologies. Future work could also examine a broader range of physicians&#x2019; real-world experiences with AI implementation, as well as patient perspectives on AI use in health care settings. Longitudinal research would be valuable in tracking how physicians&#x2019; concerns evolve with increasing familiarity and use of AI tools. Additionally, further exploration of how regulatory frameworks adapt to the unique challenges of AI in health care will be essential for informing safe, ethical, and effective technology integration.</p></sec></sec></body><back><ack><p>The authors thank Chantz Strong, Brian Andrews, and Henry Lam for their assistance with various aspects of this study.</p></ack><notes><sec><title>Funding</title><p>The study was conducted using internal funding from the Canadian Medical Protective Association.</p></sec><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are not publicly available due to ethical and privacy restrictions but are available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CMPA</term><def><p>Canadian Medical Protective Association</p></def></def-item><def-item><term id="abb3">EMR</term><def><p>electronic medical record</p></def></def-item><def-item><term id="abb4">HIPAA</term><def><p>Health Insurance Portability and Accountability Act</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chalutz-Ben Gal</surname><given-names>H</given-names> </name><name name-style="western"><surname>Margherita</surname><given-names>A</given-names> </name></person-group><article-title>The adoption of artificial intelligence (AI) in healthcare: a model of value assessment, human resource and health system factors</article-title><source>Technol Anal Strateg Manag</source><year>2025</year><volume>37</volume><issue>13</issue><fpage>4662</fpage><lpage>4675</lpage><pub-id pub-id-type="doi">10.1080/09537325.2025.2467928</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ahmed</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Spooner</surname><given-names>B</given-names> </name><name name-style="western"><surname>Isherwood</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lane</surname><given-names>M</given-names> </name><name name-style="western"><surname>Orrock</surname><given-names>E</given-names> </name><name name-style="western"><surname>Dennison</surname><given-names>A</given-names> </name></person-group><article-title>A systematic review of the barriers to the implementation of artificial intelligence in healthcare</article-title><source>Cureus</source><year>2023</year><month>10</month><day>4</day><volume>15</volume><issue>10</issue><fpage>e46454</fpage><pub-id pub-id-type="doi">10.7759/cureus.46454</pub-id><pub-id pub-id-type="medline">37927664</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hassan</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kushniruk</surname><given-names>A</given-names> </name><name name-style="western"><surname>Borycki</surname><given-names>E</given-names> </name></person-group><article-title>Barriers to and facilitators of artificial intelligence adoption in health care: scoping review</article-title><source>JMIR Hum Factors</source><year>2024</year><month>08</month><day>29</day><volume>11</volume><fpage>e48633</fpage><pub-id pub-id-type="doi">10.2196/48633</pub-id><pub-id pub-id-type="medline">39207831</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="web"><article-title>The medico-legal lens on AI use by Canadian physicians</article-title><source>Canadian Medical Protective Association</source><year>2024</year><access-date>2026-02-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.cmpa-acpm.ca/en/research-policy/public-policy/the-medico-legal-lens-on-ai-use-by-canadian-physicians">https://www.cmpa-acpm.ca/en/research-policy/public-policy/the-medico-legal-lens-on-ai-use-by-canadian-physicians</ext-link></comment></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aggarwal</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sounderajah</surname><given-names>V</given-names> </name><name name-style="western"><surname>Martin</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Diagnostic accuracy of deep learning in medical imaging: a systematic review and meta-analysis</article-title><source>NPJ Digit Med</source><year>2021</year><month>04</month><day>7</day><volume>4</volume><issue>1</issue><fpage>65</fpage><pub-id pub-id-type="doi">10.1038/s41746-021-00438-z</pub-id><pub-id pub-id-type="medline">33828217</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abdullah</surname><given-names>YI</given-names> </name><name name-style="western"><surname>Schuman</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Shabsigh</surname><given-names>R</given-names> </name><name name-style="western"><surname>Caplan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Al-Aswad</surname><given-names>LA</given-names> </name></person-group><article-title>Ethics of artificial intelligence in medicine and ophthalmology</article-title><source>Asia Pac J Ophthalmol (Phila)</source><year>2021</year><volume>10</volume><issue>3</issue><fpage>289</fpage><lpage>298</lpage><pub-id pub-id-type="doi">10.1097/APO.0000000000000397</pub-id><pub-id pub-id-type="medline">34383720</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ayorinde</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mensah</surname><given-names>DO</given-names> </name><name name-style="western"><surname>Walsh</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Health care professionals&#x2019; experience of using AI: systematic review with narrative synthesis</article-title><source>J Med Internet Res</source><year>2024</year><month>10</month><day>30</day><volume>26</volume><fpage>e55766</fpage><pub-id pub-id-type="doi">10.2196/55766</pub-id><pub-id pub-id-type="medline">39476382</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Obermeyer</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Powers</surname><given-names>B</given-names> </name><name name-style="western"><surname>Vogeli</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mullainathan</surname><given-names>S</given-names> </name></person-group><article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title><source>Science</source><year>2019</year><month>10</month><day>25</day><volume>366</volume><issue>6464</issue><fpage>447</fpage><lpage>453</lpage><pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id><pub-id pub-id-type="medline">31649194</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajkomar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hardt</surname><given-names>M</given-names> </name><name name-style="western"><surname>Howell</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Corrado</surname><given-names>G</given-names> </name><name name-style="western"><surname>Chin</surname><given-names>MH</given-names> </name></person-group><article-title>Ensuring fairness in machine learning to advance health equity</article-title><source>Ann Intern Med</source><year>2018</year><month>12</month><day>18</day><volume>169</volume><issue>12</issue><fpage>866</fpage><lpage>872</lpage><pub-id pub-id-type="doi">10.7326/M18-1990</pub-id><pub-id pub-id-type="medline">30508424</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>M</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>B</given-names> </name><name name-style="western"><surname>Cai</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Acceptance of clinical artificial intelligence among physicians and medical students: a systematic review with cross-sectional survey</article-title><source>Front Med (Lausanne)</source><year>2022</year><month>08</month><day>31</day><volume>9</volume><fpage>990604</fpage><pub-id pub-id-type="doi">10.3389/fmed.2022.990604</pub-id><pub-id pub-id-type="medline">36117979</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lawton</surname><given-names>T</given-names> </name><name name-style="western"><surname>Morgan</surname><given-names>P</given-names> </name><name name-style="western"><surname>Porter</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Clinicians risk becoming &#x201C;liability sinks&#x201D; for artificial intelligence</article-title><source>Future Healthc J</source><year>2024</year><month>02</month><volume>11</volume><issue>1</issue><fpage>100007</fpage><pub-id pub-id-type="doi">10.1016/j.fhj.2024.100007</pub-id><pub-id pub-id-type="medline">38646041</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Hendry</surname><given-names>M</given-names> </name></person-group><article-title>AI tools in healthcare sector rife with legal and ethical risk that must be mitigated</article-title><source>Lexpert</source><year>2024</year><access-date>2026-02-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.lexpert.ca/news/technology-law/ai-tools-in-healthcare-sector-rife-with-legal-and-ethical-risk-that-must-be-mitigated/386725">https://www.lexpert.ca/news/technology-law/ai-tools-in-healthcare-sector-rife-with-legal-and-ethical-risk-that-must-be-mitigated/386725</ext-link></comment></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McDougall</surname><given-names>A</given-names> </name><name name-style="western"><surname>Zaslow</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>C</given-names> </name><etal/></person-group><article-title>The medico-legal helpline: a content analysis of postgraduate medical trainee advice calls</article-title><source>Med Educ</source><year>2021</year><month>03</month><volume>55</volume><issue>3</issue><fpage>387</fpage><lpage>393</lpage><pub-id pub-id-type="doi">10.1111/medu.14374</pub-id><pub-id pub-id-type="medline">32931068</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Braun</surname><given-names>V</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>V</given-names> </name></person-group><article-title>Using thematic analysis in psychology</article-title><source>Qual Res Psychol</source><year>2006</year><volume>3</volume><issue>2</issue><fpage>77</fpage><lpage>101</lpage><pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Miles</surname><given-names>MB</given-names> </name><name name-style="western"><surname>Huberman</surname><given-names>AM</given-names> </name></person-group><source>Qualitative Data Analysis: An Expanded Sourcebook</source><year>1994</year><edition>2</edition><publisher-name>SAGE Publications</publisher-name></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jones</surname><given-names>C</given-names> </name><name name-style="western"><surname>Thornton</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wyatt</surname><given-names>JC</given-names> </name></person-group><article-title>Artificial intelligence and clinical decision support: clinicians&#x2019; perspectives on trust, trustworthiness, and liability</article-title><source>Med Law Rev</source><year>2023</year><month>11</month><day>27</day><volume>31</volume><issue>4</issue><fpage>501</fpage><lpage>520</lpage><pub-id pub-id-type="doi">10.1093/medlaw/fwad013</pub-id><pub-id pub-id-type="medline">37218368</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Sasseville</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yousefi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Ouellet</surname><given-names>S</given-names> </name><name name-style="western"><surname>Stefan</surname><given-names>T</given-names> </name><name name-style="western"><surname>Carnovale</surname><given-names>V</given-names> </name><name name-style="western"><surname>Bergeron</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Impacts of AI scribes on clinical outcomes, efficiency, and documentation</article-title><year>2024</year><access-date>2026-02-28</access-date><publisher-name>SPOR Evidence Alliance</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://sporevidencealliance.ca/wp-content/uploads/2024/11/SPOREA_AI_SCRIBE_FINAL_Report.pdf">https://sporevidencealliance.ca/wp-content/uploads/2024/11/SPOREA_AI_SCRIBE_FINAL_Report.pdf</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>HJ</given-names> </name></person-group><article-title>Patient perspectives on informed consent for medical AI: a web-based experiment</article-title><source>Digit Health</source><year>2024</year><month>04</month><day>30</day><volume>10</volume><fpage>20552076241247938</fpage><pub-id pub-id-type="doi">10.1177/20552076241247938</pub-id><pub-id pub-id-type="medline">38698829</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Agarwal</surname><given-names>P</given-names> </name><name name-style="western"><surname>Lall</surname><given-names>R</given-names> </name><name name-style="western"><surname>Girdhari</surname><given-names>R</given-names> </name></person-group><article-title>Artificial intelligence scribes in primary care</article-title><source>CMAJ</source><year>2024</year><month>09</month><day>15</day><volume>196</volume><issue>30</issue><fpage>E1042</fpage><pub-id pub-id-type="doi">10.1503/cmaj.240363</pub-id><pub-id pub-id-type="medline">39284604</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Richardson</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>C</given-names> </name><name name-style="western"><surname>Curtis</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Patient apprehensions about the use of artificial intelligence in healthcare</article-title><source>NPJ Digit Med</source><year>2021</year><month>09</month><day>21</day><volume>4</volume><issue>1</issue><fpage>140</fpage><pub-id pub-id-type="doi">10.1038/s41746-021-00509-1</pub-id><pub-id pub-id-type="medline">34548621</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moy</surname><given-names>S</given-names> </name><name name-style="western"><surname>Irannejad</surname><given-names>M</given-names> </name><name name-style="western"><surname>Manning</surname><given-names>SJ</given-names> </name><etal/></person-group><article-title>Patient perspectives on the use of artificial intelligence in health care: a scoping review</article-title><source>J Patient Cent Res Rev</source><year>2024</year><month>04</month><day>2</day><volume>11</volume><issue>1</issue><fpage>51</fpage><lpage>62</lpage><pub-id pub-id-type="doi">10.17294/2330-0698.2029</pub-id><pub-id pub-id-type="medline">38596349</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="web"><article-title>Professional guidelines regarding artificial intelligence (AI) scribes in clinical care</article-title><source>College of Physicians and Surgeons of Nova Scotia</source><year>2025</year><access-date>2026-02-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://cpsns.ns.ca/registrants/physicians/standards-guidelines/artificial-intelligence-ai-scribes-in-clinical-care/">https://cpsns.ns.ca/registrants/physicians/standards-guidelines/artificial-intelligence-ai-scribes-in-clinical-care/</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yadav</surname><given-names>N</given-names> </name><name name-style="western"><surname>Pandey</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dudani</surname><given-names>P</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rangarajan</surname><given-names>K</given-names> </name></person-group><article-title>Data privacy in healthcare: in the era of artificial intelligence</article-title><source>Indian Dermatol Online J</source><year>2023</year><month>10</month><day>27</day><volume>14</volume><issue>6</issue><fpage>788</fpage><lpage>792</lpage><pub-id pub-id-type="doi">10.4103/idoj.idoj_543_23</pub-id><pub-id pub-id-type="medline">38099022</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharma</surname><given-names>K</given-names> </name></person-group><article-title>Data privacy and security in AI-driven healthcare: a critical analysis</article-title><source>Int J Sustain Dev Comput Sci</source><year>2023</year><access-date>2026-02-28</access-date><volume>5</volume><issue>3</issue><comment><ext-link ext-link-type="uri" xlink:href="https://ijsdcs.com/index.php/ijsdcs/article/view/416">https://ijsdcs.com/index.php/ijsdcs/article/view/416</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Da Silva</surname><given-names>M</given-names> </name><name name-style="western"><surname>Flood</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Goldenberg</surname><given-names>A</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>D</given-names> </name></person-group><article-title>Regulating the safety of health-related artificial intelligence</article-title><source>Healthc Policy</source><year>2022</year><month>05</month><volume>17</volume><issue>4</issue><fpage>63</fpage><lpage>77</lpage><pub-id pub-id-type="doi">10.12927/hcpol.2022.26824</pub-id><pub-id pub-id-type="medline">35686827</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Murdoch</surname><given-names>B</given-names> </name><name name-style="western"><surname>Jandura</surname><given-names>A</given-names> </name><name name-style="western"><surname>Caulfield</surname><given-names>T</given-names> </name></person-group><article-title>Privacy concerns with commercial artificial intelligence for healthcare</article-title><year>2021</year><access-date>2026-02-28</access-date><publisher-name>Office of the Privacy Commissioner of Canada</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.ualberta.ca/en/law/media-library/faculty-research/hli/research/docs/privacy-concerns-with-commercial-artificial-intelligence-for-healthcare.pdf">https://www.ualberta.ca/en/law/media-library/faculty-research/hli/research/docs/privacy-concerns-with-commercial-artificial-intelligence-for-healthcare.pdf</ext-link></comment></nlm-citation></ref></ref-list></back></article>