<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR AI</journal-id><journal-id journal-id-type="publisher-id">ai</journal-id><journal-id journal-id-type="index">41</journal-id><journal-title>JMIR AI</journal-title><abbrev-journal-title>JMIR AI</abbrev-journal-title><issn pub-type="epub">2817-1705</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v4i1e75527</article-id><article-id pub-id-type="doi">10.2196/75527</article-id><article-categories><subj-group subj-group-type="heading"><subject>Viewpoint</subject></subj-group></article-categories><title-group><article-title>Balancing Innovation and Control: The European Union AI Act in an Era of Global Uncertainty</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Bignami</surname><given-names>Elena Giovanna</given-names></name><degrees>Prof Dr Med, MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Russo</surname><given-names>Michele</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Semeraro</surname><given-names>Federico</given-names></name><degrees>MD, FERC</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Bellini</surname><given-names>Valentina</given-names></name><degrees>Prof Dr Med, MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Anesthesiology, Critical Care and Pain Medicine Division, Department of Medicine and Surgery, University of Parma</institution><addr-line>Via Gramsci, 14</addr-line><addr-line>Parma</addr-line><country>Italy</country></aff><aff id="aff2"><institution>Department of Anaesthesia, Intensive Care and Prehospital Emergency, Ospedale Maggiore Carlo Alberto Pizzardi</institution><addr-line>Bologna</addr-line><country>Italy</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Malin</surname><given-names>Bradley</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Sellens</surname><given-names>Joan Torrent</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Azai</surname><given-names>Jude</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Gaynor</surname><given-names>Mark</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Elena Giovanna Bignami, Prof Dr Med, MD, Anesthesiology, Critical Care and Pain Medicine Division, Department of Medicine and Surgery, University of Parma, Via Gramsci, 14, Parma, 43121, Italy, 39 0521702111; <email>elenagiovanna.bignami@unipr.it</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>30</day><month>10</month><year>2025</year></pub-date><volume>4</volume><elocation-id>e75527</elocation-id><history><date date-type="received"><day>05</day><month>04</month><year>2025</year></date><date date-type="rev-recd"><day>22</day><month>06</month><year>2025</year></date><date date-type="accepted"><day>25</day><month>08</month><year>2025</year></date></history><copyright-statement>&#x00A9; Elena Giovanna Bignami, Michele Russo, Federico Semeraro, Valentina Bellini. Originally published in JMIR AI (<ext-link ext-link-type="uri" xlink:href="https://ai.jmir.org">https://ai.jmir.org</ext-link>), 30.10.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.ai.jmir.org/">https://www.ai.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://ai.jmir.org/2025/1/e75527"/><abstract><p>The European Union's Artificial Intelligence Act (EU AI Act), adopted in 2024, establishes a landmark regulatory framework for artificial intelligence (AI) systems, with significant implications for health care. The Act classifies medical AI as "high-risk," imposing stringent requirements for transparency, data governance, and human oversight. While these measures aim to safeguard patient safety, they may also hinder innovation, particularly for smaller health care providers and startups. Concurrently, geopolitical instability&#x2014;marked by rising military expenditures, trade tensions, and supply chain disruptions&#x2014;threatens health care innovation and access. This paper examines the challenges and opportunities posed by the AI Act in health care within a volatile geopolitical landscape. It evaluates the intersection of Europe's regulatory approach with competing priorities, including technological sovereignty, ethical AI, and equitable health care, while addressing unintended consequences such as reduced innovation and supply chain vulnerabilities. The study employs a comprehensive review of the EU AI Act's provisions, geopolitical trends, and their implications for health care. It analyzes regulatory documents, stakeholder statements, and case studies to assess compliance burdens, innovation barriers, and geopolitical risks. The paper also synthesizes recommendations from multidisciplinary experts to propose actionable solutions. Key findings include: (1) the AI Act's high-risk classification for medical AI could improve patient safety but risks stifling innovation due to compliance costs (eg, &#x20AC;29,277 annually per AI unit) and certification burdens (&#x20AC;16,800-23,000 per unit); (2) geopolitical factors&#x2014;such as United States-China semiconductor tariffs and EU rearmament&#x2014;exacerbate supply chain vulnerabilities and divert funding from health care innovation; (3) the dominance of "superstar" firms in AI development may marginalize smaller players, further concentrating innovation in well-resourced organizations; and (4) regulatory sandboxes, AI literacy programs, and international collaboration emerge as viable strategies to balance innovation and compliance. The EU AI Act provides a critical framework for ethical AI in health care, but its success depends on mitigating regulatory burdens and geopolitical risks. Proactive measures&#x2014;such as multidisciplinary task forces, resilient supply chains, and human-augmented AI systems&#x2014;are essential to foster innovation while ensuring patient safety. Policymakers, clinicians, and technologists must collaborate to navigate these challenges in an era of global uncertainty.</p></abstract><kwd-group><kwd>EU AI Act</kwd><kwd>health care innovation</kwd><kwd>geopolitics</kwd><kwd>trade tariffs</kwd><kwd>regulatory sandboxes</kwd><kwd>AI ethics</kwd><kwd>supply chain resilience</kwd><kwd>European Union Artificial Intelligence</kwd><kwd>Artificial Intelligence ethics</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>We are writing to discuss the implications of the European Union&#x2019;s Artificial Intelligence Act (EU AI Act) on health care, particularly in the context of the current geopolitical climate. The AI Act, which is poised to become a landmark regulation, has significant ramifications for the development, deployment, and governance of AI systems in health care. At the same time, the geopolitical landscape, marked by increasing military expenditures in Europe, the threat of trade tariffs from the United States, and broader global instability, demands that we consider how these regulatory frameworks interact with the realities of a rapidly changing world.</p><p>This paper aims to explore these complex dynamics, highlighting the opportunities and challenges posed by the AI Act in the context of a rapidly evolving geopolitical environment. By examining the intersection of AI regulation, health care innovation, and global instability, we seek to provide a nuanced understanding of how nations can navigate these challenges to ensure that AI technologies are harnessed responsibly and effectively for the benefit of patients and health care systems worldwide.</p></sec><sec id="s2"><title>The EU AI Act and Health care: A Brief Overview</title><p>The EU AI Act, adopted in 2024, represents a comprehensive attempt to regulate AI systems based on their risk levels [<xref ref-type="bibr" rid="ref1">1</xref>]. For health care, the Act&#x2019;s classification of AI systems as &#x201C;high-risk&#x201D; is particularly relevant, as it encompasses technologies that directly impact patient care and outcomes. High-risk AI systems in health care include those used in medical devices, patient management, and diagnostic tools. These systems are subject to stringent requirements, including robust risk management, data governance, and human oversight. The Act also mandates transparency, ensuring that health care providers and patients are aware when AI is being used in decision-making processes [<xref ref-type="bibr" rid="ref2">2</xref>].</p><p>However, the Act&#x2019;s focus on risk mitigation raises important questions about its potential impact on innovation. The health care sector is increasingly reliant on AI for tasks ranging from predictive analytics to robotic surgery [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>], and the regulatory burden imposed by the AI Act could slow the pace of technological advancement. For instance, compliance with the Act requires significant investment in technical documentation, quality management systems, and cybersecurity measures [<xref ref-type="bibr" rid="ref5">5</xref>]. While these requirements are essential for ensuring patient safety, they may disproportionately affect smaller health care providers or startups, particularly those developing or using high-risk AI applications such as diagnostic tools or patient management systems. Based on the &#x201C;Study to Support an Impact Assessment of Regulatory Requirements for Artificial Intelligence in Europe&#x201D; by the European Commission [<xref ref-type="bibr" rid="ref6">6</xref>], compliance costs for a single AI unit could reach &#x20AC;29,277 (US $34,153) annually&#x2014;a substantial burden for resource-constrained organizations. Startups may face additional challenges due to certification costs (&#x20AC;16,800&#x2010;23,000 per unit equivalent to US $19,598-26831 per unit) and the complexity of meeting requirements for human oversight, data governance, and transparency. While the regulation targets only 10% of AI systems as &#x201C;high-risk,&#x201D; health care innovations often fall into this category, potentially stifling innovation or diverting funds from research and development leading to a concentration of AI development in larger, well-funded organizations, potentially stifling competition and innovation However, the study notes that existing General Data Protection Regulation compliance (which overlaps with some AI Act requirements) might mitigate costs for some small and medium-sized enterprises, and sector-specific guidance could ease implementation. The lack of qualified notified bodies for certification may further delay market entry for smaller players.</p><p>Moreover, the current structure of the AI innovation ecosystem is marked by the dominance of &#x201C;superstar&#x201D; firms&#x2014;large multinational technology corporations with extensive computational resources, proprietary data access, and deep regulatory expertise [<xref ref-type="bibr" rid="ref7">7</xref>]. These firms are increasingly shaping the trajectory of AI development, including in the biomedical field, often through strategies of destructive creation that consolidate market power and marginalize smaller players. This dynamic has profound implications for health care AI, where dominant firms may prioritize scalable solutions that align with their commercial models rather than localized, need-based innovations. The AI Act&#x2019;s high compliance costs, while necessary for risk mitigation, may unintentionally entrench this asymmetry by privileging those with existing infrastructure for regulatory conformity. This raises important questions about market concentration, regulatory capture, and the diminishing space for entrepreneurial and community-based innovation in digital health.</p><p>In addition, digital health tools and AI-based health care solutions possess attributes of nonrival public goods, generating substantial positive externalities and knowledge spillovers. For example, AI algorithms trained on diverse population datasets can yield insights far beyond their initial application, contributing to broader epidemiological surveillance, health equity research, and public health policy. However, the social benefits generated by these tools are not always aligned with the private incentives of developers. This misalignment risks underinvestment in socially valuable innovations&#x2014;particularly those targeting rare diseases, marginalized populations, or preventive health&#x2014;unless appropriate policy mechanisms are established. A nuanced regulatory framework must therefore, account not only for risks and compliance but also for the collective value of AI in health care and its role in a broader innovation commons.</p><p>Nevertheless, it is important to acknowledge that several regulatory bodies and major medical technology firms have responded positively to the AI Act, viewing it not as a barrier but as an enabler of innovation and trust [<xref ref-type="bibr" rid="ref8">8</xref>]. Leading manufacturers, including multinational medical technology companies, have started developing internal AI governance frameworks aligned with the Act&#x2019;s provisions, recognizing these as essential for building trust, enhancing market confidence, and maintaining global competitiveness. For instance, some companies have expressed support for classifying AI-based diagnostic tools as high-risk, arguing that clear regulatory requirements reduce legal uncertainty and promote safer, more consistent deployment at scale [<xref ref-type="bibr" rid="ref9">9</xref>]. Regulatory authorities such as the European Medicines Agency and notified bodies have also underscored the value of early engagement through structured mechanisms like voluntary premarket consultations and regulatory sandboxes, which can facilitate compliance and shorten time-to-market for trustworthy AI-driven medical technologies [<xref ref-type="bibr" rid="ref10">10</xref>].</p><p>Furthermore, adaptation strategies such as the integration of AI governance frameworks, including AI lifecycle management platforms and automated conformity assessment tools, are being actively adopted by industry leaders to meet the EU AI Act&#x2019;s compliance requirements efficiently [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. These tools facilitate adherence to obligations like risk management, data quality, technical documentation, human oversight, and post-market monitoring. Some hospital systems have also begun building dedicated AI regulatory and ethics teams, often working alongside AI developers and clinical stakeholders to ensure agile yet compliant innovation pipelines, particularly given the classification of many health care AI systems as &#x2019;high-risk&#x2019; under the Act [<xref ref-type="bibr" rid="ref13">13</xref>]. These examples suggest that, while challenging, successful adaptation to the AI Act is feasible&#x2014;especially for organizations that proactively invest in regulatory preparedness and multistakeholder collaboration.</p></sec><sec id="s3"><title>Geopolitical Uncertainty and Its Impact on Health Care</title><p>The geopolitical environment in which the AI Act is being implemented is fraught with uncertainty, and these external pressures could have profound implications for health care innovation. Europe is currently witnessing a significant increase in military expenditures, driven by the ongoing conflict in Ukraine and the broader rearmament of NATO member states. This shift in priorities has the potential to divert resources away from health care and other critical social services, including funding for AI research and development. In a context where health care systems are already under strain from rising costs and workforce shortages, this reallocation of resources could further hinder the adoption of AI technologies.</p><p>At the same time, the recent imposition of trade tariffs by the United States on key technology imports, including semiconductors and advanced medical devices, has exacerbated supply chain vulnerabilities. Many of the components essential for AI-driven health care technologies, such as semiconductors and advanced sensors, are produced in a limited number of countries, making supply chains vulnerable to geopolitical disruptions. For example, tensions between the United States and China over trade and technology could lead to export controls, making AI systems less accessible to health care providers in other countries.</p><p>Furthermore, the global competition for AI dominance, often framed as a race between the United States and China, has significant implications for Europe&#x2019;s ability to maintain its technological sovereignty [<xref ref-type="bibr" rid="ref14">14</xref>]. While the AI Act represents a step forward in terms of ethical AI governance, it may also be seen as a barrier to innovation if it is not balanced with sufficient support for research and development. Europe risks falling behind in the global AI race if its regulatory frameworks are perceived as overly restrictive, potentially leading to a &#x201C;brain drain&#x201D; of AI talent to more innovation-friendly regions.</p></sec><sec id="s4"><title>AI, Labor Markets, and Indirect Health Risks</title><p>The transformative potential of generative and autonomous AI systems extends beyond clinical applications and into the broader socioeconomic fabric, including labor markets, which are inextricably linked to public health. The automation of diagnostic processes, administrative workflows, and even clinical reasoning threatens to displace certain categories of health care employment. While AI can augment clinical tasks and reduce burden, its large-scale deployment without social protections may contribute to job insecurity, professional deskilling, and psychological stress among health care workers [<xref ref-type="bibr" rid="ref15">15</xref>]. Drawing from the work of Case and Deaton on &#x201C;deaths of despair&#x201D; [<xref ref-type="bibr" rid="ref16">16</xref>], we recognize that economic precarity and disconnection from meaningful work are major determinants of population health. Policymakers must anticipate these secondary effects when promoting AI adoption and consider protective strategies such as retraining programs, mental health support, and frameworks for meaningful human-machine collaboration in clinical settings.</p></sec><sec id="s5"><title>The Intersection of AI Regulation and Geopolitical Realities</title><p>The intersection of AI regulation and geopolitical realities presents both challenges and opportunities for health care. On the one hand, the AI Act provides a framework for ensuring that AI systems in health care are safe, transparent, and accountable. This is particularly important in a field where decisions can have life-or-death consequences. On the other hand, the Act must be implemented in a way that does not hinder the ability of European health care providers to compete globally or to respond to emerging threats, whether they be pandemics, cyberattacks, or the consequences of geopolitical instability.</p><p>One area where the AI Act could have a particularly significant impact is in the development of AI-driven diagnostic tools. These tools, which have the potential to revolutionize health care by enabling earlier and more accurate diagnoses, are classified as high-risk under the Act [<xref ref-type="bibr" rid="ref17">17</xref>]. While this classification is understandable given the potential consequences of diagnostic errors, it also raises questions about how to balance regulation with innovation. For example, how can we ensure that the regulatory burden does not discourage the development of AI tools that could improve access to health care in underserved areas? This is particularly relevant in the context of global health disparities, where AI-driven diagnostics could play a crucial role in bridging gaps in health care access.</p><p>Additionally, the geopolitical context adds another layer of complexity to the implementation of the AI Act. For instance, the increasing use of AI in military applications, such as autonomous drones and cyber warfare, raises ethical questions about the dual-use potential of AI technologies. While the AI Act focuses on civilian applications, the broader geopolitical environment may influence how these technologies are developed and deployed. Europe must navigate these challenges carefully to ensure that its regulatory frameworks do not inadvertently weaken its position in the global AI landscape.</p><p>Another dimension often overlooked in discussions of AI governance is the environmental impact of AI development and its intersection with health. High-performance computing, data storage, and large-scale model training required for cutting-edge health AI systems consume significant energy and contribute to carbon emissions. These environmental costs carry indirect health consequences, particularly for vulnerable populations already burdened by climate-related disease and resource scarcity. In this light, responsible AI innovation in health care must also be environmentally sustainable [<xref ref-type="bibr" rid="ref18">18</xref>]. Future iterations of the AI Act and related frameworks could benefit from incorporating sustainability criteria&#x2014;such as lifecycle assessments, emissions reporting, or green AI standards&#x2014;to ensure that health innovation does not inadvertently generate new public health risks through environmental degradation.</p></sec><sec id="s6"><title>A Call for Reflection and Action</title><p>In light of these challenges, we urge the scientific health care community to take a proactive role in addressing the intersection of AI regulation, health care, and geopolitical uncertainty. The rapid evolution of AI technologies, coupled with increasing global complexity, demands a coordinated and realistic response. While some solutions may face implementation challenges, they can serve as guiding principles or pilot initiatives adaptable to local conditions. To navigate this landscape effectively, we propose the following revised and context-aware actions.</p><sec id="s6-1"><title>Develop Context-Aware Multidisciplinary Task Forces</title><p>While political and institutional fragmentation across the EU poses challenges, the creation of multidisciplinary task forces&#x2014;initially on a national or regional level&#x2014;can serve as a pragmatic starting point. These groups, comprising AI experts, clinicians, ethicists, and legal scholars, can work to identify actionable bottlenecks in the implementation of the AI Act. Rather than replacing the EU&#x2019;s legislative role, such task forces would serve in an advisory capacity, promoting dialogue between practitioners and regulators. Initiatives like the European AI Alliance show that inclusive, multistakeholder discussion is possible despite complex politics [<xref ref-type="bibr" rid="ref19">19</xref>]. One promising approach is the creation of regulatory sandboxes&#x2014;controlled testing environments where AI technologies can be piloted under temporary regulatory flexibility, with close oversight from authorities. For instance, a hospital could trial an AI diagnostic tool under monitored conditions, with safeguards like mandatory human oversight, real-time bias audits, and strict patient consent protocols. Such sandboxes, already tested in sectors like finance (eg, the UK&#x2019;s Financial Conduct Authority&#x2019;s sandbox for fintech) and digital health (eg, the Food and Drug Administration&#x2019;s precertification program), could accelerate compliance while mitigating risks [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. By adapting these models to health care, task forces could help shape sandbox frameworks that align with the AI Act&#x2019;s high-risk requirements&#x2014;ensuring innovation thrives without compromising safety or ethical standards.</p></sec><sec id="s6-2"><title>Invest in AI Literacy and Training</title><p>To equip health care professionals with the knowledge and skills needed to understand and work alongside AI systems, investment in AI literacy could be beneficial. This includes training on the ethical use of AI, data governance, and cybersecurity, ensuring that health care providers can confidently integrate AI into their practices while adhering to regulatory requirements [<xref ref-type="bibr" rid="ref22">22</xref>]. Training programs should also address the geopolitical implications of AI, helping health care professionals understand how global trends may impact their work. Given the linguistic and cultural diversity across Europe, a fully centralized training system is indeed unrealistic. Instead, we advocate for a federated approach by developing modular training curricula that can be localized and adapted by individual Member States or health care institutions. EU agencies such as the European Health and Digital Executive Agency [<xref ref-type="bibr" rid="ref23">23</xref>] can support these initiatives by setting minimum competency standards and offering open-access multilingual materials. This approach balances regional autonomy with EU-wide quality assurance and can be complemented by peer-exchange platforms and international collaboration.</p></sec><sec id="s6-3"><title>Advocate for Geopolitically Resilient Supply Chains</title><p>It includes working with policymakers and industry leaders to create resilient supply chains for medical devices and AI technologies. This includes diversifying suppliers, investing in local manufacturing capabilities, and developing contingency plans to mitigate the impact of trade tariffs or geopolitical disruptions. However, we recognize that building a completely geopolitically resilient supply chain is not immediately feasible. Instead, we propose diversification and strategic redundancy as realistic steps. This includes identifying critical dependencies (eg, on semiconductors), incentivizing dual-sourcing contracts, and fostering EU-based innovation hubs, such as the European Chips Act initiative [<xref ref-type="bibr" rid="ref24">24</xref>]. While resilience cannot eliminate risk, it can reduce vulnerability to single points of failure, particularly in high-risk sectors like health care.</p></sec><sec id="s6-4"><title>Foster International Collaboration Without Sacrificing Sovereignty</title><p>Despite growing techno-nationalism, there remains scope for international collaboration on standards and ethics&#x2014;especially in health, where global challenges demand collective solutions. Forums such as the World Health Organization, Organisation for Economic Co-operation and Development, and G7/G20 AI initiatives offer platforms for aligning regulatory principles without ceding national control [<xref ref-type="bibr" rid="ref25">25</xref>-<xref ref-type="bibr" rid="ref27">27</xref>]. Shared frameworks can lower regulatory friction and support equitable access to AI-driven health care in lower-resource settings.</p></sec><sec id="s6-5"><title>Implement Human-Augmented AI Systems</title><p>Adopting a &#x201C;human-in-the-loop&#x201D; approach where AI-generated responses are verified by vetted peer clinician consultants might be . This approach combines the efficiency of AI with the trust and expertise of human clinicians, ensuring more accurate and reliable decision support. The system can be scaled from local institutions to global platforms, with various incentives for peer consultants, such as recognition, continuing medical education credits, or monetary compensation [<xref ref-type="bibr" rid="ref28">28</xref>]. Of course, it is not always practical or necessary to require human oversight for every AI output. However, for high-stakes clinical applications (eg, diagnosis, triage, treatment decisions), human-in-the-loop systems offer a safeguard against automation bias and unexpected model behavior [<xref ref-type="bibr" rid="ref29">29</xref>]. We suggest developing risk-tiered guidelines, where human oversight is proportionate to potential patient harm. Enforcement can occur through compliance mechanisms tied to medical device regulation and certification (eg, CE marking), mirroring how pharmacovigilance currently mandates oversight for novel therapies [<xref ref-type="bibr" rid="ref30">30</xref>].</p></sec><sec id="s6-6"><title>Prioritize Ethical AI Development</title><p>Establishment of clear ethical guidelines for the use of AI in health care, particularly in high-stakes scenarios such as triage during conflicts or pandemics can further development. These guidelines should address issues such as bias, transparency, and accountability, ensuring that AI systems are used in ways that prioritize patient welfare and uphold fundamental rights. Ethical considerations should also extend to the geopolitical implications of AI, such as the potential for AI technologies to be used in ways that exacerbate global inequalities. Regulating ethical AI development in private industry is undeniably challenging, especially without global harmonization. However, governments and institutions can shape behavior through a mix of regulation, incentives, and transparency requirements. This includes public reporting of algorithmic impact assessments, bias audits, and governance structures&#x2014;similar to financial disclosures. Procurement policies can also serve as a powerful lever: health care systems can favor vendors that demonstrate ethical compliance, much like environmental or labor standards in other sectors [<xref ref-type="bibr" rid="ref31">31</xref>]. Furthermore, ethical guidelines co-developed with industry stakeholders can foster shared ownership and practical relevance.</p><p>By framing these actions not as universal solutions but as scalable, adaptable strategies, we believe the scientific health care community can meaningfully engage with both the risks and promises of AI. Realistic implementation pathways grounded in the current political, linguistic, and economic landscape are essential for ensuring that AI technologies contribute to safe, equitable, and resilient health care systems across Europe and beyond. (<xref ref-type="fig" rid="figure1">Figure 1</xref>)</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>AI-generated image representing the need for a balanced approach to AI regulation that safeguards patient safety while fostering innovation, even in the face of a rapidly changing geopolitical landscape. AI: artificial intelligence.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v4i1e75527_fig01.png"/></fig></sec></sec><sec id="s7" sec-type="conclusions"><title>Conclusion</title><p>The EU AI Act offers a vital framework for ensuring the safe and ethical use of AI in health care but its effectiveness depends on balanced implementation amid geopolitical uncertainty. To avoid stifling innovation, regulatory efforts must be paired with support for adaptation, investment in AI literacy, and resilient infrastructure. To maximize the benefits of the EU AI Act while mitigating its risks, a broadened perspective is needed&#x2014;one that recognizes not only direct clinical applications but also the wider socio-economic and environmental contexts in which health care AI operates. This includes addressing market concentration in AI research and development, aligning private innovation with public health needs, anticipating labor market disruptions, and reducing the ecological footprint of digital health. By integrating these factors into future policy and implementation strategies, Europe can foster an AI ecosystem that is not only safe and innovative, but also equitable, sustainable, and socially responsible.</p><p>We urge policymakers, health care leaders, and industry to collaborate on practical solutions, such as regulatory sandboxes, ethical oversight, and international alignment to ensure that AI advances benefit patients while reinforcing Europe&#x2019;s role in global health innovation.</p></sec></body><back><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">EU AI</term><def><p>European Union's Artificial Intelligence</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><article-title>Regulation of the European Parliament and of the Council on Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act)</article-title><source>European Commission</source><access-date>2025-03-12</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:52021PC0206">https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:52021PC0206</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bignami</surname><given-names>E</given-names> </name><name name-style="western"><surname>Russo</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lanza</surname><given-names>R</given-names> </name><name name-style="western"><surname>Bellini</surname><given-names>V</given-names> </name></person-group><article-title>Navigating the integration of large language models in healthcare: challenges, opportunities, and implications under the EU AI Act</article-title><source>J Anesth Analg Crit Care</source><year>2024</year><month>12</month><day>2</day><volume>4</volume><issue>1</issue><fpage>79</fpage><pub-id pub-id-type="doi">10.1186/s44158-024-00215-w</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Knudsen</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Ghaffar</surname><given-names>U</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>R</given-names> </name><name name-style="western"><surname>Hung</surname><given-names>AJ</given-names> </name></person-group><article-title>Clinical applications of artificial intelligence in robotic surgery</article-title><source>J Robotic Surg</source><year>2024</year><volume>18</volume><issue>1</issue><fpage>102</fpage><pub-id pub-id-type="doi">10.1007/s11701-024-01867-0</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dixon</surname><given-names>D</given-names> </name><name name-style="western"><surname>Sattar</surname><given-names>H</given-names> </name><name name-style="western"><surname>Moros</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Unveiling the influence of ai predictive analytics on patient outcomes: a comprehensive narrative review</article-title><source>Cureus</source><year>2024</year><month>05</month><volume>16</volume><issue>5</issue><fpage>e59954</fpage><pub-id pub-id-type="doi">10.7759/cureus.59954</pub-id><pub-id pub-id-type="medline">38854327</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Williams</surname><given-names>PA</given-names> </name><name name-style="western"><surname>Woodward</surname><given-names>AJ</given-names> </name></person-group><article-title>Cybersecurity vulnerabilities in medical devices: a complex environment and multifaceted problem</article-title><source>Med Devices (Auckl)</source><year>2015</year><volume>8</volume><fpage>305</fpage><lpage>316</lpage><pub-id pub-id-type="doi">10.2147/MDER.S50048</pub-id><pub-id pub-id-type="medline">26229513</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Renda</surname><given-names>A</given-names> </name><name name-style="western"><surname>Arroyo</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fanni</surname><given-names>R</given-names> </name><etal/></person-group><source>Study to support an impact assessment of regulatory requirements for artificial intelligence in Europe</source><year>2021</year><publisher-name>Publications Office of the European Union</publisher-name><pub-id pub-id-type="doi">10.2759/523404</pub-id><pub-id pub-id-type="other">978-92-76-36220-3</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Filippucci</surname><given-names>F</given-names> </name><name name-style="western"><surname>Gal</surname><given-names>P</given-names> </name><name name-style="western"><surname>Jona-Lasinio</surname><given-names>C</given-names> </name><name name-style="western"><surname>Leandro</surname><given-names>A</given-names> </name><name name-style="western"><surname>Nicoletti</surname><given-names>G</given-names> </name></person-group><article-title>The impact of artificial intelligence on productivity, distribution and growth: key mechanisms, initial evidence and policy challenges</article-title><source>OECD Artificial Intelligence Papers</source><year>2024</year><issue>15</issue><pub-id pub-id-type="doi">10.1787/8d900037-en</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="web"><article-title>Polarion vs responsible AI</article-title><source>Siemens</source><year>2024</year><access-date>2025-06-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://blogs.sw.siemens.com/polarion/2024-polarion-vs-responsible-ai/">https://blogs.sw.siemens.com/polarion/2024-polarion-vs-responsible-ai/</ext-link></comment></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="web"><source>Stakeholder joint statement on access to innovative healthcare under the Artificial Intelligence Act (AI Act)</source><access-date>2025-06-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.medtecheurope.org/wp-content/uploads/2023/06/230619-ai-act-letter-with-healthcare-stakeholders_final.pdf">https://www.medtecheurope.org/wp-content/uploads/2023/06/230619-ai-act-letter-with-healthcare-stakeholders_final.pdf</ext-link></comment></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="web"><article-title>EMA regulatory science to 2025 strategic reflection</article-title><source>European Medicines Agency</source><access-date>2025-06-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.ema.europa.eu/en/documents/regulatory-procedural-guideline/ema-regulatory-science-2025-strategic-reflection_en.pdf">https://www.ema.europa.eu/en/documents/regulatory-procedural-guideline/ema-regulatory-science-2025-strategic-reflection_en.pdf</ext-link></comment></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="web"><article-title>Accelerate responsible AI adoption</article-title><source>onetrust</source><access-date>2025-06-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.onetrust.com/solutions/ai-governance/">https://www.onetrust.com/solutions/ai-governance/</ext-link></comment></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="web"><article-title>AI governance platform for responsible and trustworthy AI</article-title><source>Modulos</source><access-date>2025-06-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.modulos.ai/modulos-ai-governance-platform/">https://www.modulos.ai/modulos-ai-governance-platform/</ext-link></comment></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="web"><source>The Topol Review: preparing the healthcare workforce to deliver the digital future</source><access-date>2025-06-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://topol.hee.nhs.uk/">https://topol.hee.nhs.uk/</ext-link></comment></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Briganti Dini</surname><given-names>G</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Costa</surname><given-names>O</given-names> </name><name name-style="western"><surname>Lecha</surname><given-names>E</given-names> </name><name name-style="western"><surname>Vlaskamp</surname><given-names>MC</given-names> </name></person-group><article-title>The EU&#x2019;s response to the fragmented emergence of artificial intelligence</article-title><source>EU Foreign Policy in a Fragmenting International Order The European Union in International Affairs</source><year>2025</year><publisher-name>Palgrave Macmillan</publisher-name><pub-id pub-id-type="doi">10.1007/978-3-031-64060-5_8</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pavuluri</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sangal</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sather</surname><given-names>J</given-names> </name><name name-style="western"><surname>Taylor</surname><given-names>RA</given-names> </name></person-group><article-title>Balancing act: the complex role of artificial intelligence in addressing burnout and healthcare workforce dynamics</article-title><source>BMJ Health Care Inform</source><year>2024</year><month>08</month><day>24</day><volume>31</volume><issue>1</issue><fpage>e101120</fpage><pub-id pub-id-type="doi">10.1136/bmjhci-2024-101120</pub-id><pub-id pub-id-type="medline">39181545</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Case</surname><given-names>A</given-names> </name><name name-style="western"><surname>Deaton</surname><given-names>A</given-names> </name></person-group><source>Deaths of Despair and the Future of Capitalism</source><year>2021</year><publisher-name>Princeton University Press</publisher-name><fpage>1</fpage><lpage>336</lpage><pub-id pub-id-type="doi">10.2307/j.ctvpr7rb2</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Kolfschooten</surname><given-names>H</given-names> </name><name name-style="western"><surname>van Oirschot</surname><given-names>J</given-names> </name></person-group><article-title>The EU Artificial Intelligence Act (2024): implications for healthcare</article-title><source>Health Policy</source><year>2024</year><month>11</month><volume>149</volume><fpage>105152</fpage><pub-id pub-id-type="doi">10.1016/j.healthpol.2024.105152</pub-id><pub-id pub-id-type="medline">39244818</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Berger</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ehlers</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Nitsche</surname><given-names>J</given-names> </name></person-group><article-title>Aligning with the goals of the planetary health concept regarding ecological sustainability and digital health: scoping review</article-title><source>J Med Internet Res</source><year>2025</year><month>05</month><day>28</day><volume>27</volume><fpage>e71795</fpage><pub-id pub-id-type="doi">10.2196/71795</pub-id><pub-id pub-id-type="medline">40435494</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="web"><article-title>The European AI Alliance</article-title><source>European Commission</source><access-date>2025-06-21</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://digital-strategy.ec.europa.eu/en/policies/european-ai-alliance">https://digital-strategy.ec.europa.eu/en/policies/european-ai-alliance</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cornelli</surname><given-names>G</given-names> </name><name name-style="western"><surname>Doerr</surname><given-names>S</given-names> </name><name name-style="western"><surname>Gambacorta</surname><given-names>L</given-names> </name><name name-style="western"><surname>Merrouche</surname><given-names>O</given-names> </name></person-group><article-title>Regulatory sandboxes and fintech funding: evidence from the UK</article-title><source>Rev Financ</source><year>2024</year><month>01</month><day>16</day><volume>28</volume><issue>1</issue><fpage>203</fpage><lpage>233</lpage><pub-id pub-id-type="doi">10.1093/rof/rfad017</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alon</surname><given-names>N</given-names> </name><name name-style="western"><surname>Stern</surname><given-names>AD</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name></person-group><article-title>Assessing the Food and Drug Administration&#x2019;s risk-based framework for software precertification with top health apps in the United States: quality improvement study</article-title><source>JMIR Mhealth Uhealth</source><year>2020</year><month>10</month><day>26</day><volume>8</volume><issue>10</issue><fpage>e20482</fpage><pub-id pub-id-type="doi">10.2196/20482</pub-id><pub-id pub-id-type="medline">32927429</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bignami</surname><given-names>EG</given-names> </name><name name-style="western"><surname>Russo</surname><given-names>M</given-names> </name><name name-style="western"><surname>Semeraro</surname><given-names>F</given-names> </name><name name-style="western"><surname>Bellini</surname><given-names>V</given-names> </name></person-group><article-title>Technoethics in real life: AI as a core clinical competency</article-title><source>J Anesth Analg Crit Care</source><year>2025</year><month>03</month><day>1</day><volume>5</volume><issue>1</issue><fpage>13</fpage><pub-id pub-id-type="doi">10.1186/s44158-025-00233-2</pub-id><pub-id pub-id-type="medline">40025619</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Katehakis</surname><given-names>DG</given-names> </name><name name-style="western"><surname>Filippidis</surname><given-names>D</given-names> </name><name name-style="western"><surname>Karamanis</surname><given-names>K</given-names> </name><etal/></person-group><article-title>The smartHEALTH European Digital Innovation Hub experiences and challenges for accelerating the transformation of public and private organizations within the innovation ecosystem</article-title><source>Front Med (Lausanne)</source><year>2024</year><volume>11</volume><fpage>1503235</fpage><pub-id pub-id-type="doi">10.3389/fmed.2024.1503235</pub-id><pub-id pub-id-type="medline">39678035</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><article-title>European Chips Act: the Chips for Europe Initiative</article-title><source>European Commission</source><access-date>2025-06-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://digital-strategy.ec.europa.eu/en/factpages/european-chips-act-chips-europe-initiative">https://digital-strategy.ec.europa.eu/en/factpages/european-chips-act-chips-europe-initiative</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="web"><article-title>Global Initiative on AI for Health</article-title><source>World Health Organization</source><access-date>2025-06-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/initiatives/global-initiative-on-ai-for-health">https://www.who.int/initiatives/global-initiative-on-ai-for-health</ext-link></comment></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="web"><article-title>AI principles</article-title><source>OECD</source><access-date>2025-06-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.oecd.org/en/topics/sub-issues/ai-principles.html">https://www.oecd.org/en/topics/sub-issues/ai-principles.html</ext-link></comment></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="web"><article-title>G7 consensus reached on advancing AI for sustainable development</article-title><source>UNDP</source><access-date>2025-06-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.undp.org/news/g7-consensus-reached-advancing-ai-sustainable-development">https://www.undp.org/news/g7-consensus-reached-advancing-ai-sustainable-development</ext-link></comment></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Minehart</surname><given-names>RD</given-names> </name><name name-style="western"><surname>Stefanski</surname><given-names>SE</given-names> </name></person-group><article-title>Artificial intelligence supporting anesthesiology clinical decision-making</article-title><source>Anesth Analg</source><year>2025</year><month>09</month><day>1</day><volume>141</volume><issue>3</issue><fpage>536</fpage><lpage>539</lpage><pub-id pub-id-type="doi">10.1213/ANE.0000000000007473</pub-id><pub-id pub-id-type="medline">40080433</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>McKay</surname><given-names>MH</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Degen</surname><given-names>H</given-names> </name><name name-style="western"><surname>Stavroula</surname><given-names>N</given-names> </name></person-group><article-title>Realizing the promise of AI governance involving humans-in-the-loop</article-title><source>HCI International 2024 &#x2013; Late Breaking Papers</source><year>2024</year><publisher-name>Springer Cham</publisher-name><series>Lecture Notes in Computer Science</series><pub-id pub-id-type="doi">10.1007/978-3-031-76827-9_7</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dimitsaki</surname><given-names>S</given-names> </name><name name-style="western"><surname>Natsiavas</surname><given-names>P</given-names> </name><name name-style="western"><surname>Jaulent</surname><given-names>MC</given-names> </name></person-group><article-title>Applying AI to structured real-world data for pharmacovigilance purposes: scoping review</article-title><source>J Med Internet Res</source><year>2024</year><month>12</month><day>30</day><volume>26</volume><fpage>e57824</fpage><pub-id pub-id-type="doi">10.2196/57824</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Singla</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Namboodri</surname><given-names>T</given-names> </name></person-group><article-title>Globalization and international issues in sustainable manufacturing</article-title><source>Sustainability in Smart Manufacturing</source><year>2024</year><publisher-name>CRC Press</publisher-name><fpage>1</fpage><lpage>18</lpage><pub-id pub-id-type="doi">10.1201/9781003467496-1</pub-id></nlm-citation></ref></ref-list></back></article>