@Article{info:doi/10.2196/68661, author="Ruotsalainen, Pekka and Blobel, Bernd", title="A System Model and Requirements for Transformation to Human-Centric Digital Health", journal="J Med Internet Res", year="2025", month="Apr", day="28", volume="27", pages="e68661", keywords="digital health", keywords="human rights", keywords="privacy", keywords="dignity", keywords="autonomy", keywords="digital economy", keywords="neoliberalism", keywords="modeling", keywords="system analysis", keywords="artificial intelligence", doi="10.2196/68661", url="https://www.jmir.org/2025/1/e68661" } @Article{info:doi/10.2196/70789, author="Chen, Jun and Liu, Yu and Liu, Peng and Zhao, Yiming and Zuo, Yan and Duan, Hui", title="Adoption of Large Language Model AI Tools in Everyday Tasks: Multisite Cross-Sectional Qualitative Study of Chinese Hospital Administrators", journal="J Med Internet Res", year="2025", month="Apr", day="1", volume="27", pages="e70789", keywords="large language model", keywords="artificial intelligence", keywords="health care administration", keywords="technology adoption", keywords="hospital administrator", keywords="qualitative study", keywords="barriers to adoption", abstract="Background: Large language model (LLM) artificial intelligence (AI) tools have the potential to streamline health care administration by enhancing efficiency in document drafting, resource allocation, and communication tasks. Despite this potential, the adoption of such tools among hospital administrators remains understudied, particularly at the individual level. Objective: This study aims to explore factors influencing the adoption and use of LLM AI tools among hospital administrators in China, focusing on enablers, barriers, and practical applications in daily administrative tasks. Methods: A multicenter, cross-sectional, descriptive qualitative design was used. Data were collected through semistructured face-to-face interviews with 31 hospital administrators across 3 tertiary hospitals in Beijing, Shenzhen, and Chengdu from June 2024 to August 2024. The Colaizzi method was used for thematic analysis to identify patterns in participants' experiences and perspectives. Results: Adoption of LLM AI tools was generally low, with significant site-specific variations. Participants with higher technological familiarity and positive early experiences reported more frequent use, while barriers such as mistrust in tool accuracy, limited prompting skills, and insufficient training hindered broader adoption. Tools were primarily used for document drafting, with limited exploration of advanced functionalities. Participants strongly emphasized the need for structured training programs and institutional support to enhance usability and confidence. Conclusions: Familiarity with technology, positive early experiences, and openness to innovation may facilitate adoption, while barriers such as limited knowledge, mistrust in tool accuracy, and insufficient prompting skills can hinder broader use. LLM AI tools are now primarily used for basic tasks such as document drafting, with limited application to more advanced functionalities due to a lack of training and confidence. Structured tutorials and institutional support are needed to enhance usability and integration. Targeted training programs, combined with organizational strategies to build trust and improve accessibility, could enhance adoption rates and broaden tool use. Future quantitative investigations should validate the adoption rate and influencing factors. ", doi="10.2196/70789", url="https://www.jmir.org/2025/1/e70789" } @Article{info:doi/10.2196/65567, author="Kauttonen, Janne and Rousi, Rebekah and Alam{\"a}ki, Ari", title="Trust and Acceptance Challenges in the Adoption of AI Applications in Health Care: Quantitative Survey Analysis", journal="J Med Internet Res", year="2025", month="Mar", day="21", volume="27", pages="e65567", keywords="artificial intelligence", keywords="AI", keywords="health care technology", keywords="technology adoption", keywords="predictive modeling", keywords="user trust", keywords="user acceptance", abstract="Background: Artificial intelligence (AI) has potential to transform health care, but its successful implementation depends on the trust and acceptance of consumers and patients. Understanding the factors that influence attitudes toward AI is crucial for effective adoption. Despite AI's growing integration into health care, consumer and patient acceptance remains a critical challenge. Research has largely focused on applications or attitudes, lacking a comprehensive analysis of how factors, such as demographics, personality traits, technology attitudes, and AI knowledge, affect and interact across different health care AI contexts. Objective: We aimed to investigate people's trust in and acceptance of AI across health care use cases and determine how context and perceived risk affect individuals' propensity to trust and accept AI in specific health care scenarios. Methods: We collected and analyzed web-based survey data from 1100 Finnish participants, presenting them with 8 AI use cases in health care: 5 (62\%) noninvasive applications (eg, activity monitoring and mental health support) and 3 (38\%) physical interventions (eg, AI-controlled robotic surgery). Respondents evaluated intention to use, trust, and willingness to trade off personal data for these use cases. Gradient boosted tree regression models were trained to predict responses based on 33 demographic-, personality-, and technology-related variables. To interpret the results of our predictive models, we used the Shapley additive explanations method, a game theory--based approach for explaining the output of machine learning models. It quantifies the contribution of each feature to individual predictions, allowing us to determine the relative importance of various demographic-, personality-, and technology-related factors and their interactions in shaping participants' trust in and acceptance of AI in health care. Results: Consumer attitudes toward technology, technology use, and personality traits were the primary drivers of trust and intention to use AI in health care. Use cases were ranked by acceptance, with noninvasive monitors being the most preferred. However, the specific use case had less impact in general than expected. Nonlinear dependencies were observed, including an inverted U-shaped pattern in positivity toward AI based on self-reported AI knowledge. Certain personality traits, such as being more disorganized and careless, were associated with more positive attitudes toward AI in health care. Women seemed more cautious about AI applications in health care than men. Conclusions: The findings highlight the complex interplay of factors influencing trust and acceptance of AI in health care. Consumer trust and intention to use AI in health care are driven by technology attitudes and use rather than specific use cases. AI service providers should consider demographic factors, personality traits, and technology attitudes when designing and implementing AI systems in health care. The study demonstrates the potential of using predictive AI models as decision-making tools for implementing and interacting with clients in health care AI applications. ", doi="10.2196/65567", url="https://www.jmir.org/2025/1/e65567" } @Article{info:doi/10.2196/53892, author="Cabral, Pereira Bernardo and Braga, Maciel Luiza Amara and Conte Filho, Gilbert Carlos and Penteado, Bruno and Freire de Castro Silva, Luis Sandro and Castro, Leonardo and Fornazin, Marcelo and Mota, Fabio", title="Future Use of AI in Diagnostic Medicine: 2-Wave Cross-Sectional Survey Study", journal="J Med Internet Res", year="2025", month="Feb", day="27", volume="27", pages="e53892", keywords="artificial intelligence", keywords="AI", keywords="diagnostic medicine", keywords="survey research", keywords="researcher opinion", keywords="future", abstract="Background: The rapid evolution of artificial intelligence (AI) presents transformative potential for diagnostic medicine, offering opportunities to enhance diagnostic accuracy, reduce costs, and improve patient outcomes. Objective: This study aimed to assess the expected future impact of AI on diagnostic medicine by comparing global researchers' expectations using 2 cross-sectional surveys. Methods: The surveys were conducted in September 2020 and February 2023. Each survey captured a 10-year projection horizon, gathering insights from >3700 researchers with expertise in AI and diagnostic medicine from all over the world. The survey sought to understand the perceived benefits, integration challenges, and evolving attitudes toward AI use in diagnostic settings. Results: Results indicated a strong expectation among researchers that AI will substantially influence diagnostic medicine within the next decade. Key anticipated benefits include enhanced diagnostic reliability, reduced screening costs, improved patient care, and decreased physician workload, addressing the growing demand for diagnostic services outpacing the supply of medical professionals. Specifically, x-ray diagnosis, heart rhythm interpretation, and skin malignancy detection were identified as the diagnostic tools most likely to be integrated with AI technologies due to their maturity and existing AI applications. The surveys highlighted the growing optimism regarding AI's ability to transform traditional diagnostic pathways and enhance clinical decision-making processes. Furthermore, the study identified barriers to the integration of AI in diagnostic medicine. The primary challenges cited were the difficulties of embedding AI within existing clinical workflows, ethical and regulatory concerns, and data privacy issues. Respondents emphasized uncertainties around legal responsibility and accountability for AI-supported clinical decisions, data protection challenges, and the need for robust regulatory frameworks to ensure safe AI deployment. Ethical concerns, particularly those related to algorithmic transparency and bias, were noted as increasingly critical, reflecting a heightened awareness of the potential risks associated with AI adoption in clinical settings. Differences between the 2 survey waves indicated a growing focus on ethical and regulatory issues, suggesting an evolving recognition of these challenges over time. Conclusions: Despite these barriers, there was notable consistency in researchers' expectations across the 2 survey periods, indicating a stable and sustained outlook on AI's transformative potential in diagnostic medicine. The findings show the need for interdisciplinary collaboration among clinicians, AI developers, and regulators to address ethical and practical challenges while maximizing AI's benefits. This study offers insights into the projected trajectory of AI in diagnostic medicine, guiding stakeholders, including health care providers, policy makers, and technology developers, on navigating the opportunities and challenges of AI integration. ", doi="10.2196/53892", url="https://www.jmir.org/2025/1/e53892", url="http://www.ncbi.nlm.nih.gov/pubmed/40053779" } @Article{info:doi/10.2196/68347, author="Hadar-Shoval, Dorit and Lvovsky, Maya and Asraf, Kfir and Shimoni, Yoav and Elyoseph, Zohar", title="The Feasibility of Large Language Models in Verbal Comprehension Assessment: Mixed Methods Feasibility Study", journal="JMIR Form Res", year="2025", month="Feb", day="24", volume="9", pages="e68347", keywords="large language models", keywords="verbal comprehension assessment", keywords="artificial intelligence", keywords="AI in psychodiagnostics", keywords="personalized intelligence tests", keywords="verbal comprehension index", keywords="Wechsler Adult Intelligence Scale", keywords="WAIS-III", keywords="psychological test validity", keywords="ethics in computerized cognitive assessment", abstract="Background: Cognitive assessment is an important component of applied psychology, but limited access and high costs make these evaluations challenging. Objective: This study aimed to examine the feasibility of using large language models (LLMs) to create personalized artificial intelligence--based verbal comprehension tests (AI-BVCTs) for assessing verbal intelligence, in contrast with traditional assessment methods based on standardized norms. Methods: We used a within-participants design, comparing scores obtained from AI-BVCTs with those from the Wechsler Adult Intelligence Scale (WAIS-III) verbal comprehension index (VCI). In total, 8 Hebrew-speaking participants completed both the VCI and AI-BVCT, the latter being generated using the LLM Claude. Results: The concordance correlation coefficient (CCC) demonstrated strong agreement between AI-BVCT and VCI scores (Claude: CCC=.75, 90\% CI 0.266-0.933; GPT-4: CCC=.73, 90\% CI 0.170-0.935). Pearson correlations further supported these findings, showing strong associations between VCI and AI-BVCT scores (Claude: r=.84, P<.001; GPT-4: r=.77, P=.02). No statistically significant differences were found between AI-BVCT and VCI scores (P>.05). Conclusions: These findings support the potential of LLMs to assess verbal intelligence. The study attests to the promise of AI-based cognitive tests in increasing the accessibility and affordability of assessment processes, enabling personalized testing. The research also raises ethical concerns regarding privacy and overreliance on AI in clinical work. Further research with larger and more diverse samples is needed to establish the validity and reliability of this approach and develop more accurate scoring procedures. ", doi="10.2196/68347", url="https://formative.jmir.org/2025/1/e68347" } @Article{info:doi/10.2196/50708, author="Rinderknecht, Fatuma-Ayaan and Yang, B. Vivian and Tilahun, Mekaleya and Lester, C. Jenna", title="Perspectives of Black, Latinx, Indigenous, and Asian Communities on Health Data Use and AI: Cross-Sectional Survey Study", journal="J Med Internet Res", year="2025", month="Feb", day="21", volume="27", pages="e50708", keywords="augmented intelligence", keywords="artificial intelligence", keywords="health equity", keywords="dermatology", keywords="Black", keywords="Latinx", keywords="Indigenous", keywords="Asian", keywords="racial and ethnic minority communities", keywords="AI", keywords="health care", keywords="health data", keywords="survey", keywords="racism", keywords="large language model", keywords="LLM", keywords="diversity", doi="10.2196/50708", url="https://www.jmir.org/2025/1/e50708" } @Article{info:doi/10.2196/65565, author="Owoyemi, Ayomide and Osuchukwu, Joanne and Salwei, E. Megan and Boyd, Andrew", title="Checklist Approach to Developing and Implementing AI in Clinical Settings: Instrument Development Study", journal="JMIRx Med", year="2025", month="Feb", day="20", volume="6", pages="e65565", keywords="artificial intelligence", keywords="machine learning", keywords="algorithm", keywords="model", keywords="analytics", keywords="AI deployment", keywords="human-AI interaction", keywords="AI integration", keywords="checklist", keywords="clinical workflow", keywords="clinical setting", keywords="literature review", abstract="Background: The integration of artificial intelligence (AI) in health care settings demands a nuanced approach that considers both technical performance and sociotechnical factors. Objective: This study aimed to develop a checklist that addresses the sociotechnical aspects of AI deployment in health care and provides a structured, holistic guide for teams involved in the life cycle of AI systems. Methods: A literature synthesis identified 20 relevant studies, forming the foundation for the Clinical AI Sociotechnical Framework checklist. A modified Delphi study was then conducted with 35 global health care professionals. Participants assessed the checklist's relevance across 4 stages: ``Planning,'' ``Design,'' ``Development,'' and ``Proposed Implementation.'' A consensus threshold of 80\% was established for each item. IQRs and Cronbach $\alpha$ were calculated to assess agreement and reliability. Results: The initial checklist had 45 questions. Following participant feedback, the checklist was refined to 34 items, and a final round saw 100\% consensus on all items (mean score >0.8, IQR 0). Based on the outcome of the Delphi study, a final checklist was outlined, with 1 more question added to make 35 questions in total. Conclusions: The Clinical AI Sociotechnical Framework checklist provides a comprehensive, structured approach to developing and implementing AI in clinical settings, addressing technical and social factors critical for adoption and success. This checklist is a practical tool that aligns AI development with real-world clinical needs, aiming to enhance patient outcomes and integrate smoothly into health care workflows. ", doi="10.2196/65565", url="https://xmed.jmir.org/2025/1/e65565" } @Article{info:doi/10.2196/58161, author="Gazquez-Garcia, Javier and S{\'a}nchez-Bocanegra, Luis Carlos and Sevillano, Luis Jose", title="AI in the Health Sector: Systematic Review of Key Skills for Future Health Professionals", journal="JMIR Med Educ", year="2025", month="Feb", day="5", volume="11", pages="e58161", keywords="artificial intelligence", keywords="healthcare competencies", keywords="systematic review", keywords="healthcare education", keywords="AI regulation", abstract="Background: Technological advancements have significantly reshaped health care, introducing digital solutions that enhance diagnostics and patient care. Artificial intelligence (AI) stands out, offering unprecedented capabilities in data analysis, diagnostic support, and personalized medicine. However, effectively integrating AI into health care necessitates specialized competencies among professionals, an area still in its infancy in terms of comprehensive literature and formalized training programs. Objective: This systematic review aims to consolidate the essential skills and knowledge health care professionals need to integrate AI into their clinical practice effectively, according to the published literature. Methods: We conducted a systematic review, across databases PubMed, Scopus, and Web of Science, of peer-reviewed literature that directly explored the required skills for health care professionals to integrate AI into their practice, published in English or Spanish from 2018 onward. Studies that did not refer to specific skills or training in digital health were not included, discarding those that did not directly contribute to understanding the competencies necessary to integrate AI into health care practice. Bias in the examined works was evaluated following Cochrane's domain-based recommendations. Results: The initial database search yielded a total of 2457 articles. After deleting duplicates and screening titles and abstracts, 37 articles were selected for full-text review. Out of these, only 7 met all the inclusion criteria for this systematic review. The review identified a diverse range of skills and competencies, that we categorized into 14 key areas classified based on their frequency of appearance in the selected studies, including AI fundamentals, data analytics and management, and ethical considerations. Conclusions: Despite the broadening of search criteria to capture the evolving nature of AI in health care, the review underscores a significant gap in focused studies on the required competencies. Moreover, the review highlights the critical role of regulatory bodies such as the US Food and Drug Administration in facilitating the adoption of AI technologies by establishing trust and standardizing algorithms. Key areas were identified for developing competencies among health care professionals for the implementation of AI, including: AI fundamentals knowledge (more focused on assessing the accuracy, reliability, and validity of AI algorithms than on more technical abilities such as programming or mathematics), data analysis skills (including data acquisition, cleaning, visualization, management, and governance), and ethical and legal considerations. In an AI-enhanced health care landscape, the ability to humanize patient care through effective communication is paramount. This balance ensures that while AI streamlines tasks and potentially increases patient interaction time, health care professionals maintain a focus on compassionate care, thereby leveraging AI to enhance, rather than detract from, the patient experience.\emspace ", doi="10.2196/58161", url="https://mededu.jmir.org/2025/1/e58161" } @Article{info:doi/10.2196/53207, author="Rosenbacke, Rikard and Melhus, {\AA}sa and McKee, Martin and Stuckler, David", title="How Explainable Artificial Intelligence Can Increase or Decrease Clinicians' Trust in AI Applications in Health Care: Systematic Review", journal="JMIR AI", year="2024", month="Oct", day="30", volume="3", pages="e53207", keywords="explainable artificial intelligence", keywords="XAI", keywords="trustworthy AI", keywords="clinician trust", keywords="affect-based measures", keywords="cognitive measures", keywords="clinical use", keywords="clinical decision-making", keywords="clinical informatics", abstract="Background: Artificial intelligence (AI) has significant potential in clinical practice. However, its ``black box'' nature can lead clinicians to question its value. The challenge is to create sufficient trust for clinicians to feel comfortable using AI, but not so much that they defer to it even when it produces results that conflict with their clinical judgment in ways that lead to incorrect decisions. Explainable AI (XAI) aims to address this by providing explanations of how AI algorithms reach their conclusions. However, it remains unclear whether such explanations foster an appropriate degree of trust to ensure the optimal use of AI in clinical practice. Objective: This study aims to systematically review and synthesize empirical evidence on the impact of XAI on clinicians' trust in AI-driven clinical decision-making. Methods: A systematic review was conducted in accordance with PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) guidelines, searching PubMed and Web of Science databases. Studies were included if they empirically measured the impact of XAI on clinicians' trust using cognition- or affect-based measures. Out of 778 articles screened, 10 met the inclusion criteria. We assessed the risk of bias using standard tools appropriate to the methodology of each paper. Results: The risk of bias in all papers was moderate or moderate to high. All included studies operationalized trust primarily through cognitive-based definitions, with 2 also incorporating affect-based measures. Out of these, 5 studies reported that XAI increased clinicians' trust compared with standard AI, particularly when the explanations were clear, concise, and relevant to clinical practice. In addition, 3 studies found no significant effect of XAI on trust, and the presence of explanations does not automatically improve trust. Notably, 2 studies highlighted that XAI could either enhance or diminish trust, depending on the complexity and coherence of the provided explanations. The majority of studies suggest that XAI has the potential to enhance clinicians' trust in recommendations generated by AI. However, complex or contradictory explanations can undermine this trust. More critically, trust in AI is not inherently beneficial, as AI recommendations are not infallible. These findings underscore the nuanced role of explanation quality and suggest that trust can be modulated through the careful design of XAI systems. Conclusions: Excessive trust in incorrect advice generated by AI can adversely impact clinical accuracy, just as can happen when correct advice is distrusted. Future research should focus on refining both cognitive and affect-based measures of trust and on developing strategies to achieve an appropriate balance in terms of trust, preventing both blind trust and undue skepticism. Optimizing trust in AI systems is essential for their effective integration into clinical practice. ", doi="10.2196/53207", url="https://ai.jmir.org/2024/1/e53207" } @Article{info:doi/10.2196/62678, author="Ball Dunlap, A. Patricia and Michalowski, Martin", title="Advancing AI Data Ethics in Nursing: Future Directions for Nursing Practice, Research, and Education", journal="JMIR Nursing", year="2024", month="Oct", day="25", volume="7", pages="e62678", keywords="artificial intelligence", keywords="AI data ethics", keywords="data-centric AI", keywords="nurses", keywords="nursing informatics", keywords="machine learning", keywords="data literacy", keywords="health care AI", keywords="responsible AI", doi="10.2196/62678", url="https://nursing.jmir.org/2024/1/e62678" } @Article{info:doi/10.2196/58011, author="Elyoseph, Zohar and Gur, Tamar and Haber, Yuval and Simon, Tomer and Angert, Tal and Navon, Yuval and Tal, Amir and Asman, Oren", title="An Ethical Perspective on the Democratization of Mental Health With Generative AI", journal="JMIR Ment Health", year="2024", month="Oct", day="17", volume="11", pages="e58011", keywords="ethics", keywords="generative artificial intelligence", keywords="generative AI", keywords="mental health", keywords="ChatGPT", keywords="large language model", keywords="LLM", keywords="digital mental health", keywords="machine learning", keywords="AI", keywords="technology", keywords="accessibility", keywords="knowledge", keywords="GenAI", doi="10.2196/58011", url="https://mental.jmir.org/2024/1/e58011" } @Article{info:doi/10.2196/53505, author="Germani, Federico and Spitale, Giovanni and Biller-Andorno, Nikola", title="The Dual Nature of AI in Information Dissemination: Ethical Considerations", journal="JMIR AI", year="2024", month="Oct", day="15", volume="3", pages="e53505", keywords="AI", keywords="bioethics", keywords="infodemic management", keywords="disinformation", keywords="artificial intelligence", keywords="ethics", keywords="ethical", keywords="infodemic", keywords="infodemics", keywords="public health", keywords="misinformation", keywords="information dissemination", keywords="information literacy", doi="10.2196/53505", url="https://ai.jmir.org/2024/1/e53505", url="http://www.ncbi.nlm.nih.gov/pubmed/39405099" } @Article{info:doi/10.2196/58493, author="Tavory, Tamar", title="Regulating AI in Mental Health: Ethics of Care Perspective", journal="JMIR Ment Health", year="2024", month="Sep", day="19", volume="11", pages="e58493", keywords="artificial intelligence", keywords="ethics of care", keywords="regulation", keywords="legal", keywords="relationship", keywords="mental health", keywords="mental healthcare", keywords="AI", keywords="ethic", keywords="ethics", keywords="ethical", keywords="regulations", keywords="law", keywords="framework", keywords="frameworks", keywords="regulatory", keywords="relationships", keywords="chatbot", keywords="chatbots", keywords="conversational agent", keywords="conversational agents", keywords="European Artificial Intelligence Act", doi="10.2196/58493", url="https://mental.jmir.org/2024/1/e58493" } @Article{info:doi/10.2196/46871, author="Han, Yu and Ceross, Aaron and Bergmann, Jeroen", title="Regulatory Frameworks for AI-Enabled Medical Device Software in China: Comparative Analysis and Review of Implications for Global Manufacturer", journal="JMIR AI", year="2024", month="Jul", day="29", volume="3", pages="e46871", keywords="NMPA", keywords="medical device software", keywords="device registration", keywords="registration pathway", keywords="artificial intelligence", keywords="machine learning", keywords="medical device", keywords="device development", keywords="China", keywords="regulations", keywords="medical software", doi="10.2196/46871", url="https://ai.jmir.org/2024/1/e46871", url="http://www.ncbi.nlm.nih.gov/pubmed/39073860" } @Article{info:doi/10.2196/55957, author="Bragazzi, Luigi Nicola and Garbarino, Sergio", title="Toward Clinical Generative AI: Conceptual Framework", journal="JMIR AI", year="2024", month="Jun", day="7", volume="3", pages="e55957", keywords="clinical intelligence", keywords="artificial intelligence", keywords="iterative process", keywords="abduction", keywords="benchmarking", keywords="verification paradigms", doi="10.2196/55957", url="https://ai.jmir.org/2024/1/e55957", url="http://www.ncbi.nlm.nih.gov/pubmed/38875592" } @Article{info:doi/10.2196/54501, author="Jordan, Alexis and Park, Albert", title="Understanding the Long Haulers of COVID-19: Mixed Methods Analysis of YouTube Content", journal="JMIR AI", year="2024", month="Jun", day="3", volume="3", pages="e54501", keywords="long haulers", keywords="post--COVID-19 condition", keywords="COVID-19", keywords="YouTube", keywords="topic modeling", keywords="natural language processing", abstract="Background: The COVID-19 pandemic had a devastating global impact. In the United States, there were >98 million COVID-19 cases and >1 million resulting deaths. One consequence of COVID-19 infection has been post--COVID-19 condition (PCC). People with this syndrome, colloquially called long haulers, experience symptoms that impact their quality of life. The root cause of PCC and effective treatments remains unknown. Many long haulers have turned to social media for support and guidance. Objective: In this study, we sought to gain a better understanding of the long hauler experience by investigating what has been discussed and how information about long haulers is perceived on social media. We specifically investigated the following: (1) the range of symptoms that are discussed, (2) the ways in which information about long haulers is perceived, (3) informational and emotional support that is available to long haulers, and (4) discourse between viewers and creators. We selected YouTube as our data source due to its popularity and wide range of audience. Methods: We systematically gathered data from 3 different types of content creators: medical sources, news sources, and long haulers. To computationally understand the video content and viewers' reactions, we used Biterm, a topic modeling algorithm created specifically for short texts, to analyze snippets of video transcripts and all top-level comments from the comment section. To triangulate our findings about viewers' reactions, we used the Valence Aware Dictionary and Sentiment Reasoner to conduct sentiment analysis on comments from each type of content creator. We grouped the comments into positive and negative categories and generated topics for these groups using Biterm. We then manually grouped resulting topics into broader themes for the purpose of analysis. Results: We organized the resulting topics into 28 themes across all sources. Examples of medical source transcript themes were Explanations in layman's terms and Biological explanations. Examples of news source transcript themes were Negative experiences and handling the long haul. The 2 long hauler transcript themes were Taking treatments into own hands and Changes to daily life. News sources received a greater share of negative comments. A few themes of these negative comments included Misinformation and disinformation and Issues with the health care system. Similarly, negative long hauler comments were organized into several themes, including Disillusionment with the health care system and Requiring more visibility. In contrast, positive medical source comments captured themes such as Appreciation of helpful content and Exchange of helpful information. In addition to this theme, one positive theme found in long hauler comments was Community building. Conclusions: The results of this study could help public health agencies, policy makers, organizations, and health researchers understand symptomatology and experiences related to PCC. They could also help these agencies develop their communication strategy concerning PCC. ", doi="10.2196/54501", url="https://ai.jmir.org/2024/1/e54501", url="http://www.ncbi.nlm.nih.gov/pubmed/38875666" } @Article{info:doi/10.2196/40781, author="Waheed, Atif Muhammad and Liu, Lu", title="Perceptions of Family Physicians About Applying AI in Primary Health Care: Case Study From a Premier Health Care Organization", journal="JMIR AI", year="2024", month="Apr", day="17", volume="3", pages="e40781", keywords="AI", keywords="artificial intelligence", keywords="perception", keywords="attitude", keywords="opinion", keywords="surveys and questionnaires", keywords="family physician", keywords="primary care", keywords="health care service provider", keywords="health care professional", keywords="ethical", keywords="AI decision-making", keywords="AI challenges", abstract="Background: The COVID-19 pandemic has led to the rapid proliferation of artificial intelligence (AI), which was not previously anticipated; this is an unforeseen development. The use of AI in health care settings is increasing, as it proves to be a promising tool for transforming health care systems, improving operational and business processes, and efficiently simplifying health care tasks for family physicians and health care administrators. Therefore, it is necessary to assess the perspective of family physicians on AI and its impact on their job roles. Objective: This study aims to determine the impact of AI on the management and practices of Qatar's Primary Health Care Corporation (PHCC) in improving health care tasks and service delivery. Furthermore, it seeks to evaluate the impact of AI on family physicians' job roles, including associated risks and ethical ramifications from their perspective. Methods: We conducted a cross-sectional survey and sent a web-based questionnaire survey link to 724 practicing family physicians at the PHCC. In total, we received 102 eligible responses. Results: Of the 102 respondents, 72 (70.6\%) were men and 94 (92.2\%) were aged between 35 and 54 years. In addition, 58 (56.9\%) of the 102 respondents were consultants. The overall awareness of AI was 80 (78.4\%) out of 102, with no difference between gender (P=.06) and age groups (P=.12). AI is perceived to play a positive role in improving health care practices at PHCC (P<.001), managing health care tasks (P<.001), and positively impacting health care service delivery (P<.001). Family physicians also perceived that their clinical, administrative, and opportunistic health care management roles were positively influenced by AI (P<.001). Furthermore, perceptions of family physicians indicate that AI improves operational and human resource management (P<.001), does not undermine patient-physician relationships (P<.001), and is not considered superior to human physicians in the clinical judgment process (P<.001). However, its inclusion is believed to decrease patient satisfaction (P<.001). AI decision-making and accountability were recognized as ethical risks, along with data protection and confidentiality. The optimism regarding using AI for future medical decisions was low among family physicians. Conclusions: This study indicated a positive perception among family physicians regarding AI integration into primary care settings. AI demonstrates significant potential for enhancing health care task management and overall service delivery at the PHCC. It augments family physicians' roles without replacing them and proves beneficial for operational efficiency, human resource management, and public health during pandemics. While the implementation of AI is anticipated to bring benefits, the careful consideration of ethical, privacy, confidentiality, and patient-centric concerns is essential. These insights provide valuable guidance for the strategic integration of AI into health care systems, with a focus on maintaining high-quality patient care and addressing the multifaceted challenges that arise during this transformative process. ", doi="10.2196/40781", url="https://ai.jmir.org/2024/1/e40781", url="http://www.ncbi.nlm.nih.gov/pubmed/38875531" } @Article{info:doi/10.2196/47652, author="Sp{\"a}th, Julian and Sewald, Zeno and Probul, Niklas and Berland, Magali and Almeida, Mathieu and Pons, Nicolas and Le Chatelier, Emmanuelle and Gin{\`e}s, Pere and Sol{\'e}, Cristina and Juanola, Adri{\`a} and Pauling, Josch and Baumbach, Jan", title="Privacy-Preserving Federated Survival Support Vector Machines for Cross-Institutional Time-To-Event Analysis: Algorithm Development and Validation", journal="JMIR AI", year="2024", month="Mar", day="29", volume="3", pages="e47652", keywords="federated learning", keywords="survival analysis", keywords="support vector machine", keywords="machine learning", keywords="federated", keywords="algorithm", keywords="survival", keywords="FeatureCloud", keywords="predict", keywords="predictive", keywords="prediction", keywords="predictions", keywords="Implementation science", keywords="Implementation", keywords="centralized model", keywords="privacy regulation", abstract="Background: Central collection of distributed medical patient data is problematic due to strict privacy regulations. Especially in clinical environments, such as clinical time-to-event studies, large sample sizes are critical but usually not available at a single institution. It has been shown recently that federated learning, combined with privacy-enhancing technologies, is an excellent and privacy-preserving alternative to data sharing. Objective: This study aims to develop and validate a privacy-preserving, federated survival support vector machine (SVM) and make it accessible for researchers to perform cross-institutional time-to-event analyses. Methods: We extended the survival SVM algorithm to be applicable in federated environments. We further implemented it as a FeatureCloud app, enabling it to run in the federated infrastructure provided by the FeatureCloud platform. Finally, we evaluated our algorithm on 3 benchmark data sets, a large sample size synthetic data set, and a real-world microbiome data set and compared the results to the corresponding central method. Results: Our federated survival SVM produces highly similar results to the centralized model on all data sets. The maximal difference between the model weights of the central model and the federated model was only 0.001, and the mean difference over all data sets was 0.0002. We further show that by including more data in the analysis through federated learning, predictions are more accurate even in the presence of site-dependent batch effects. Conclusions: The federated survival SVM extends the palette of federated time-to-event analysis methods by a robust machine learning approach. To our knowledge, the implemented FeatureCloud app is the first publicly available implementation of a federated survival SVM, is freely accessible for all kinds of researchers, and can be directly used within the FeatureCloud platform. ", doi="10.2196/47652", url="https://ai.jmir.org/2024/1/e47652", url="http://www.ncbi.nlm.nih.gov/pubmed/38875678" } @Article{info:doi/10.2196/52054, author="Wiepert, Daniela and Malin, A. Bradley and Duffy, R. Joseph and Utianski, L. Rene and Stricker, L. John and Jones, T. David and Botha, Hugo", title="Reidentification of Participants in Shared Clinical Data Sets: Experimental Study", journal="JMIR AI", year="2024", month="Mar", day="15", volume="3", pages="e52054", keywords="reidentification", keywords="privacy", keywords="adversarial attack", keywords="health care", keywords="speech disorders", keywords="voiceprint", abstract="Background: Large curated data sets are required to leverage speech-based tools in health care. These are costly to produce, resulting in increased interest in data sharing. As speech can potentially identify speakers (ie, voiceprints), sharing recordings raises privacy concerns. This is especially relevant when working with patient data protected under the Health Insurance Portability and Accountability Act. Objective: We aimed to determine the reidentification risk for speech recordings, without reference to demographics or metadata, in clinical data sets considering both the size of the search space (ie, the number of comparisons that must be considered when reidentifying) and the nature of the speech recording (ie, the type of speech task). Methods: Using a state-of-the-art speaker identification model, we modeled an adversarial attack scenario in which an adversary uses a large data set of identified speech (hereafter, the known set) to reidentify as many unknown speakers in a shared data set (hereafter, the unknown set) as possible. We first considered the effect of search space size by attempting reidentification with various sizes of known and unknown sets using VoxCeleb, a data set with recordings of natural, connected speech from >7000 healthy speakers. We then repeated these tests with different types of recordings in each set to examine whether the nature of a speech recording influences reidentification risk. For these tests, we used our clinical data set composed of recordings of elicited speech tasks from 941 speakers. Results: We found that the risk was inversely related to the number of comparisons an adversary must consider (ie, the search space), with a positive linear correlation between the number of false acceptances (FAs) and the number of comparisons (r=0.69; P<.001). The true acceptances (TAs) stayed relatively stable, and the ratio between FAs and TAs rose from 0.02 at 1 {\texttimes} 105 comparisons to 1.41 at 6 {\texttimes} 106 comparisons, with a near 1:1 ratio at the midpoint of 3 {\texttimes} 106 comparisons. In effect, risk was high for a small search space but dropped as the search space grew. We also found that the nature of a speech recording influenced reidentification risk, with nonconnected speech (eg, vowel prolongation: FA/TA=98.5; alternating motion rate: FA/TA=8) being harder to identify than connected speech (eg, sentence repetition: FA/TA=0.54) in cross-task conditions. The inverse was mostly true in within-task conditions, with the FA/TA ratio for vowel prolongation and alternating motion rate dropping to 0.39 and 1.17, respectively. Conclusions: Our findings suggest that speaker identification models can be used to reidentify participants in specific circumstances, but in practice, the reidentification risk appears small. The variation in risk due to search space size and type of speech task provides actionable recommendations to further increase participant privacy and considerations for policy regarding public release of speech recordings. ", doi="10.2196/52054", url="https://ai.jmir.org/2024/1/e52054", url="http://www.ncbi.nlm.nih.gov/pubmed/38875581" } @Article{info:doi/10.2196/47240, author="Lu, Jiahui and Zhang, Huibin and Xiao, Yi and Wang, Yingyu", title="An Environmental Uncertainty Perception Framework for Misinformation Detection and Spread Prediction in the COVID-19 Pandemic: Artificial Intelligence Approach", journal="JMIR AI", year="2024", month="Jan", day="29", volume="3", pages="e47240", keywords="misinformation detection", keywords="misinformation spread prediction", keywords="uncertainty", keywords="COVID-19", keywords="information environment", abstract="Background: Amidst the COVID-19 pandemic, misinformation on social media has posed significant threats to public health. Detecting and predicting the spread of misinformation are crucial for mitigating its adverse effects. However, prevailing frameworks for these tasks have predominantly focused on post-level signals of misinformation, neglecting features of the broader information environment where misinformation originates and proliferates. Objective: This study aims to create a novel framework that integrates the uncertainty of the information environment into misinformation features, with the goal of enhancing the model's accuracy in tasks such as misinformation detection and predicting the scale of dissemination. The objective is to provide better support for online governance efforts during health crises. Methods: In this study, we embraced uncertainty features within the information environment and introduced a novel Environmental Uncertainty Perception (EUP) framework for the detection of misinformation and the prediction of its spread on social media. The framework encompasses uncertainty at 4 scales of the information environment: physical environment, macro-media environment, micro-communicative environment, and message framing. We assessed the effectiveness of the EUP using real-world COVID-19 misinformation data sets. Results: The experimental results demonstrated that the EUP alone achieved notably good performance, with detection accuracy at 0.753 and prediction accuracy at 0.71. These results were comparable to state-of-the-art baseline models such as bidirectional long short-term memory (BiLSTM; detection accuracy 0.733 and prediction accuracy 0.707) and bidirectional encoder representations from transformers (BERT; detection accuracy 0.755 and prediction accuracy 0.728). Additionally, when the baseline models collaborated with the EUP, they exhibited improved accuracy by an average of 1.98\% for the misinformation detection and 2.4\% for spread-prediction tasks. On unbalanced data sets, the EUP yielded relative improvements of 21.5\% and 5.7\% in macro-F1-score and area under the curve, respectively. Conclusions: This study makes a significant contribution to the literature by recognizing uncertainty features within information environments as a crucial factor for improving misinformation detection and spread-prediction algorithms during the pandemic. The research elaborates on the complexities of uncertain information environments for misinformation across 4 distinct scales, including the physical environment, macro-media environment, micro-communicative environment, and message framing. The findings underscore the effectiveness of incorporating uncertainty into misinformation detection and spread prediction, providing an interdisciplinary and easily implementable framework for the field. ", doi="10.2196/47240", url="https://ai.jmir.org/2024/1/e47240", url="http://www.ncbi.nlm.nih.gov/pubmed/38875583" } @Article{info:doi/10.2196/49082, author="Hansen, Steffan and Brandt, Joakim Carl and S{\o}ndergaard, Jens", title="Beyond the Hype---The Actual Role and Risks of AI in Today's Medical Practice: Comparative-Approach Study", journal="JMIR AI", year="2024", month="Jan", day="22", volume="3", pages="e49082", keywords="AI", keywords="artificial intelligence", keywords="ChatGPT-4", keywords="Microsoft Bing", keywords="general practice", keywords="ChatGPT", keywords="chatbot", keywords="chatbots", keywords="writing", keywords="academic", keywords="academia", keywords="Bing", abstract="Background: The evolution of artificial intelligence (AI) has significantly impacted various sectors, with health care witnessing some of its most groundbreaking contributions. Contemporary models, such as ChatGPT-4 and Microsoft Bing, have showcased capabilities beyond just generating text, aiding in complex tasks like literature searches and refining web-based queries. Objective: This study explores a compelling query: can AI author an academic paper independently? Our assessment focuses on four core dimensions: relevance (to ensure that AI's response directly addresses the prompt), accuracy (to ascertain that AI's information is both factually correct and current), clarity (to examine AI's ability to present coherent and logical ideas), and tone and style (to evaluate whether AI can align with the formality expected in academic writings). Additionally, we will consider the ethical implications and practicality of integrating AI into academic writing. Methods: To assess the capabilities of ChatGPT-4 and Microsoft Bing in the context of academic paper assistance in general practice, we used a systematic approach. ChatGPT-4, an advanced AI language model by Open AI, excels in generating human-like text and adapting responses based on user interactions, though it has a knowledge cut-off in September 2021. Microsoft Bing's AI chatbot facilitates user navigation on the Bing search engine, offering tailored search Results: In terms of relevance, ChatGPT-4 delved deeply into AI's health care role, citing academic sources and discussing diverse applications and concerns, while Microsoft Bing provided a concise, less detailed overview. In terms of accuracy, ChatGPT-4 correctly cited 72\% (23/32) of its peer-reviewed articles but included some nonexistent references. Microsoft Bing's accuracy stood at 46\% (6/13), supplemented by relevant non--peer-reviewed articles. In terms of clarity, both models conveyed clear, coherent text. ChatGPT-4 was particularly adept at detailing technical concepts, while Microsoft Bing was more general. In terms of tone, both models maintained an academic tone, but ChatGPT-4 exhibited superior depth and breadth in content delivery. Conclusions: Comparing ChatGPT-4 and Microsoft Bing for academic assistance revealed strengths and limitations. ChatGPT-4 excels in depth and relevance but falters in citation accuracy. Microsoft Bing is concise but lacks robust detail. Though both models have potential, neither can independently handle comprehensive academic tasks. As AI evolves, combining ChatGPT-4's depth with Microsoft Bing's up-to-date referencing could optimize academic support. Researchers should critically assess AI outputs to maintain academic credibility. ", doi="10.2196/49082", url="https://ai.jmir.org/2024/1/e49082" } @Article{info:doi/10.2196/51204, author="Weidener, Lukas and Fischer, Michael", title="Role of Ethics in Developing AI-Based Applications in Medicine: Insights From Expert Interviews and Discussion of Implications", journal="JMIR AI", year="2024", month="Jan", day="12", volume="3", pages="e51204", keywords="artificial intelligence", keywords="AI", keywords="medicine", keywords="ethics", keywords="expert interviews", keywords="AI development", keywords="AI ethics", abstract="Background: The integration of artificial intelligence (AI)--based applications in the medical field has increased significantly, offering potential improvements in patient care and diagnostics. However, alongside these advancements, there is growing concern about ethical considerations, such as bias, informed consent, and trust in the development of these technologies. Objective: This study aims to assess the role of ethics in the development of AI-based applications in medicine. Furthermore, this study focuses on the potential consequences of neglecting ethical considerations in AI development, particularly their impact on patients and physicians. Methods: Qualitative content analysis was used to analyze the responses from expert interviews. Experts were selected based on their involvement in the research or practical development of AI-based applications in medicine for at least 5 years, leading to the inclusion of 7 experts in the study. Results: The analysis revealed 3 main categories and 7 subcategories reflecting a wide range of views on the role of ethics in AI development. This variance underscores the subjectivity and complexity of integrating ethics into the development of AI in medicine. Although some experts view ethics as fundamental, others prioritize performance and efficiency, with some perceiving ethics as potential obstacles to technological progress. This dichotomy of perspectives clearly emphasizes the subjectivity and complexity surrounding the role of ethics in AI development, reflecting the inherent multifaceted nature of this issue. Conclusions: Despite the methodological limitations impacting the generalizability of the results, this study underscores the critical importance of consistent and integrated ethical considerations in AI development for medical applications. It advocates further research into effective strategies for ethical AI development, emphasizing the need for transparent and responsible practices, consideration of diverse data sources, physician training, and the establishment of comprehensive ethical and legal frameworks. ", doi="10.2196/51204", url="https://ai.jmir.org/2024/1/e51204", url="http://www.ncbi.nlm.nih.gov/pubmed/38875585" } @Article{info:doi/10.2196/47283, author="Benjamens, Stan and Dhunnoo, Pranavsingh and G{\"o}r{\"o}g, M{\'a}rton and Mesko, Bertalan", title="Forecasting Artificial Intelligence Trends in Health Care: Systematic International Patent Analysis", journal="JMIR AI", year="2023", month="May", day="26", volume="2", pages="e47283", keywords="artificial intelligence", keywords="patent", keywords="healthcare", keywords="health care", keywords="medical", keywords="forecasting", keywords="future", keywords="AI", keywords="machine learning", keywords="medical device", keywords="open-access", keywords="AI technology", abstract="Background: Artificial intelligence (AI)-- and machine learning (ML)--based medical devices and algorithms are rapidly changing the medical field. To provide an insight into the trends in AI and ML in health care, we conducted an international patent analysis. Objective: It is pivotal to obtain a clear overview on upcoming AI and MLtrends in health care to provide regulators with a better position to foresee what technologies they will have to create regulations for, which are not yet available on the market. Therefore, in this study, we provide insights and forecasts into the trends in AI and ML in health care by conducting an international patent analysis. Methods: A systematic patent analysis, focusing on AI- and ML-based patents in health care, was performed using the Espacenet database (from January 2012 until July 2022). This database includes patents from the China National Intellectual Property Administration, European Patent Office, Japan Patent Office, Korean Intellectual Property Office, and the United States Patent and Trademark Office. Results: We identified 10,967 patents: 7332 (66.9\%) from the China National Intellectual Property Administration, 191 (1.7\%) from the European Patent Office, 163 (1.5\%) from the Japan Patent Office, 513 (4.7\%) from the Korean Intellectual Property Office, and 2768 (25.2\%) from the United States Patent and Trademark Office. The number of published patents showed a yearly doubling from 2015 until 2021. Five international companies that had the greatest impact on this increase were Ping An Medical and Healthcare Management Co Ltd with 568 (5.2\%) patents, Siemens Healthineers with 273 (2.5\%) patents, IBM Corp with 226 (2.1\%) patents, Philips Healthcare with 150 (1.4\%) patents, and Shanghai United Imaging Healthcare Co Ltd with 144 (1.3\%) patents. Conclusions: This international patent analysis showed a linear increase in patents published by the 5 largest patent offices. An open access database with interactive search options was launched for AI- and ML-based patents in health care. ", doi="10.2196/47283", url="https://ai.jmir.org/2023/1/e47283", url="http://www.ncbi.nlm.nih.gov/pubmed/10449890" } @Article{info:doi/10.2196/41205, author="Owen, David and Antypas, Dimosthenis and Hassoulas, Athanasios and Pardi{\~n}as, F. Antonio and Espinosa-Anke, Luis and Collados, Camacho Jose", title="Enabling Early Health Care Intervention by Detecting Depression in Users of Web-Based Forums using Language Models: Longitudinal Analysis and Evaluation", journal="JMIR AI", year="2023", month="Mar", day="24", volume="2", pages="e41205", keywords="mental health", keywords="depression", keywords="internet", keywords="natural language processing", keywords="transformers", keywords="language models", keywords="sentiment", abstract="Background: Major depressive disorder is a common mental disorder affecting 5\% of adults worldwide. Early contact with health care services is critical for achieving accurate diagnosis and improving patient outcomes. Key symptoms of major depressive disorder (depression hereafter) such as cognitive distortions are observed in verbal communication, which can also manifest in the structure of written language. Thus, the automatic analysis of text outputs may provide opportunities for early intervention in settings where written communication is rich and regular, such as social media and web-based forums. Objective: The objective of this study was 2-fold. We sought to gauge the effectiveness of different machine learning approaches to identify users of the mass web-based forum Reddit, who eventually disclose a diagnosis of depression. We then aimed to determine whether the time between a forum post and a depression diagnosis date was a relevant factor in performing this detection. Methods: A total of 2 Reddit data sets containing posts belonging to users with and without a history of depression diagnosis were obtained. The intersection of these data sets provided users with an estimated date of depression diagnosis. This derived data set was used as an input for several machine learning classifiers, including transformer-based language models (LMs). Results: Bidirectional Encoder Representations from Transformers (BERT) and MentalBERT transformer-based LMs proved the most effective in distinguishing forum users with a known depression diagnosis from those without. They each obtained a mean F1-score of 0.64 across the experimental setups used for binary classification. The results also suggested that the final 12 to 16 weeks (about 3-4 months) of posts before a depressed user's estimated diagnosis date are the most indicative of their illness, with data before that period not helping the models detect more accurately. Furthermore, in the 4- to 8-week period before the user's estimated diagnosis date, their posts exhibited more negative sentiment than any other 4-week period in their post history. Conclusions: Transformer-based LMs may be used on data from web-based social media forums to identify users at risk for psychiatric conditions such as depression. Language features picked up by these classifiers might predate depression onset by weeks to months, enabling proactive mental health care interventions to support those at risk for this condition. ", doi="10.2196/41205", url="https://ai.jmir.org/2023/1/e41205", url="http://www.ncbi.nlm.nih.gov/pubmed/37525646" } @Article{info:doi/10.2196/42936, author="Berdahl, Thomas Carl and Baker, Lawrence and Mann, Sean and Osoba, Osonde and Girosi, Federico", title="Strategies to Improve the Impact of Artificial Intelligence on Health Equity: Scoping Review", journal="JMIR AI", year="2023", month="Feb", day="7", volume="2", pages="e42936", keywords="artificial intelligence", keywords="machine learning", keywords="health equity", keywords="health care disparities", keywords="algorithmic bias", keywords="social determinants of health", keywords="decision making", keywords="algorithms", keywords="gray literature", keywords="equity", keywords="health data", abstract="Background: Emerging artificial intelligence (AI) applications have the potential to improve health, but they may also perpetuate or exacerbate inequities. Objective: This review aims to provide a comprehensive overview of the health equity issues related to the use of AI applications and identify strategies proposed to address them. Methods: We searched PubMed, Web of Science, the IEEE (Institute of Electrical and Electronics Engineers) Xplore Digital Library, ProQuest U.S. Newsstream, Academic Search Complete, the Food and Drug Administration (FDA) website, and ClinicalTrials.gov to identify academic and gray literature related to AI and health equity that were published between 2014 and 2021 and additional literature related to AI and health equity during the COVID-19 pandemic from 2020 and 2021. Literature was eligible for inclusion in our review if it identified at least one equity issue and a corresponding strategy to address it. To organize and synthesize equity issues, we adopted a 4-step AI application framework: Background Context, Data Characteristics, Model Design, and Deployment. We then created a many-to-many mapping of the links between issues and strategies. Results: In 660 documents, we identified 18 equity issues and 15 strategies to address them. Equity issues related to Data Characteristics and Model Design were the most common. The most common strategies recommended to improve equity were improving the quantity and quality of data, evaluating the disparities introduced by an application, increasing model reporting and transparency, involving the broader community in AI application development, and improving governance. Conclusions: Stakeholders should review our many-to-many mapping of equity issues and strategies when planning, developing, and implementing AI applications in health care so that they can make appropriate plans to ensure equity for populations affected by their products. AI application developers should consider adopting equity-focused checklists, and regulators such as the FDA should consider requiring them. Given that our review was limited to documents published online, developers may have unpublished knowledge of additional issues and strategies that we were unable to identify. ", doi="10.2196/42936", url="https://ai.jmir.org/2023/1/e42936" } @Article{info:doi/10.2196/42940, author="Mashar, Meghavi and Chawla, Shreya and Chen, Fangyue and Lubwama, Baker and Patel, Kyle and Kelshiker, A. Mihir and Bachtiger, Patrik and Peters, S. Nicholas", title="Artificial Intelligence Algorithms in Health Care: Is the Current Food and Drug Administration Regulation Sufficient?", journal="JMIR AI", year="2023", month="Jan", day="16", volume="2", pages="e42940", keywords="artificial intelligence", keywords="machine learning", keywords="regulation", doi="10.2196/42940", url="https://ai.jmir.org/2023/1/e42940" }