<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR AI</journal-id>
      <journal-title>JMIR AI</journal-title>
      <issn pub-type="epub">2817-1705</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v2i1e52888</article-id>
      <article-id pub-id-type="pmid">38875540</article-id>
      <article-id pub-id-type="doi">10.2196/52888</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Developing Ethics and Equity Principles, Terms, and Engagement Tools to Advance Health Equity and Researcher Diversity in AI and Machine Learning: Modified Delphi Approach</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Wiertz</surname>
            <given-names>Svenja</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Bito</surname>
            <given-names>Seiji</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Hendricks-Sturrup</surname>
            <given-names>Rachele</given-names>
          </name>
          <degrees>MA, MS, DHSc</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <address>
            <institution>National Alliance Against Disparities in Patient Health</institution>
            <addr-line>2700 Neabsco Common Place</addr-line>
            <addr-line>Suite 101</addr-line>
            <addr-line>Woodbridge, VA, 22191</addr-line>
            <country>United States</country>
            <phone>1 (571) 316 5116</phone>
            <email>hendricks-sturrup@nadph.org</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3390-2583</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Simmons</surname>
            <given-names>Malaika</given-names>
          </name>
          <degrees>MSHE</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0006-8863-9571</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Anders</surname>
            <given-names>Shilo</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2327-7323</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Aneni</surname>
            <given-names>Kammarauche</given-names>
          </name>
          <degrees>MBBS, MHS</degrees>
          <xref rid="aff03" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5792-3605</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Wright Clayton</surname>
            <given-names>Ellen</given-names>
          </name>
          <degrees>MD, JD</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0308-4110</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Coco</surname>
            <given-names>Joseph</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6195-5757</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Collins</surname>
            <given-names>Benjamin</given-names>
          </name>
          <degrees>MD, MS, MA</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6884-3819</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Heitman</surname>
            <given-names>Elizabeth</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff04" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-4855-8551</ext-link>
        </contrib>
        <contrib id="contrib9" contrib-type="author">
          <name name-style="western">
            <surname>Hussain</surname>
            <given-names>Sajid</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff05" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8803-3792</ext-link>
        </contrib>
        <contrib id="contrib10" contrib-type="author">
          <name name-style="western">
            <surname>Joshi</surname>
            <given-names>Karuna</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff06" ref-type="aff">6</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6354-1686</ext-link>
        </contrib>
        <contrib id="contrib11" contrib-type="author">
          <name name-style="western">
            <surname>Lemieux</surname>
            <given-names>Josh</given-names>
          </name>
          <xref rid="aff07" ref-type="aff">7</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0003-5813-4255</ext-link>
        </contrib>
        <contrib id="contrib12" contrib-type="author">
          <name name-style="western">
            <surname>Lovett Novak</surname>
            <given-names>Laurie</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0415-4301</ext-link>
        </contrib>
        <contrib id="contrib13" contrib-type="author">
          <name name-style="western">
            <surname>Rubin</surname>
            <given-names>Daniel J</given-names>
          </name>
          <degrees>MD, MSc</degrees>
          <xref rid="aff08" ref-type="aff">8</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6871-6246</ext-link>
        </contrib>
        <contrib id="contrib14" contrib-type="author">
          <name name-style="western">
            <surname>Shanker</surname>
            <given-names>Anil</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff09" ref-type="aff">9</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6372-3669</ext-link>
        </contrib>
        <contrib id="contrib15" contrib-type="author">
          <name name-style="western">
            <surname>Washington</surname>
            <given-names>Talitha</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff10" ref-type="aff">10</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3796-2273</ext-link>
        </contrib>
        <contrib id="contrib16" contrib-type="author">
          <name name-style="western">
            <surname>Waters</surname>
            <given-names>Gabriella</given-names>
          </name>
          <xref rid="aff11" ref-type="aff">11</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0001-1821-6091</ext-link>
        </contrib>
        <contrib id="contrib17" contrib-type="author">
          <name name-style="western">
            <surname>Webb Harris</surname>
            <given-names>Joyce</given-names>
          </name>
          <degrees>MA</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0002-3192-3839</ext-link>
        </contrib>
        <contrib id="contrib18" contrib-type="author">
          <name name-style="western">
            <surname>Yin</surname>
            <given-names>Rui</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff12" ref-type="aff">12</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1403-0396</ext-link>
        </contrib>
        <contrib id="contrib19" contrib-type="author">
          <name name-style="western">
            <surname>Wagner</surname>
            <given-names>Teresa</given-names>
          </name>
          <degrees>MS, DrPH</degrees>
          <xref rid="aff13" ref-type="aff">13</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0916-432X</ext-link>
        </contrib>
        <contrib id="contrib20" contrib-type="author">
          <name name-style="western">
            <surname>Yin</surname>
            <given-names>Zhijun</given-names>
          </name>
          <degrees>MS, PhD</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3075-1337</ext-link>
        </contrib>
        <contrib id="contrib21" contrib-type="author">
          <name name-style="western">
            <surname>Malin</surname>
            <given-names>Bradley</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3040-5175</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff01">
        <label>1</label>
        <institution>National Alliance Against Disparities in Patient Health</institution>
        <addr-line>Woodbridge, VA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff02">
        <label>2</label>
        <institution>Vanderbilt University Medical Center</institution>
        <addr-line>Nashville, TN</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff03">
        <label>3</label>
        <institution>Yale University</institution>
        <addr-line>New Haven, CT</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff04">
        <label>4</label>
        <institution>University of Texas Southwestern Medical Center</institution>
        <addr-line>Dallas, TX</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff05">
        <label>5</label>
        <institution>Fisk University</institution>
        <addr-line>Nashville, TN</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff06">
        <label>6</label>
        <institution>University of Maryland, Baltimore County</institution>
        <addr-line>Baltimore, MD</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff07">
        <label>7</label>
        <institution>OCHIN</institution>
        <addr-line>Portland, OR</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff08">
        <label>8</label>
        <institution>Temple University</institution>
        <addr-line>Philadelphia, PA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff09">
        <label>9</label>
        <institution>Meharry Medical College</institution>
        <addr-line>Nashville, TN</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff10">
        <label>10</label>
        <institution>AUC Data Science Initiative</institution>
        <institution>Clark Atlanta University</institution>
        <addr-line>Atlanta, GA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff11">
        <label>11</label>
        <institution>Morgan State University</institution>
        <institution>Center for Equitable AI &#38; Machine Learning Systems</institution>
        <addr-line>Baltimore, MD</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff12">
        <label>12</label>
        <institution>University of Florida</institution>
        <addr-line>Gainesville, FL</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff13">
        <label>13</label>
        <institution>University of North Texas Health Science Center</institution>
        <institution>SaferCare Texas</institution>
        <addr-line>Fort Worth, TX</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Rachele Hendricks-Sturrup <email>hendricks-sturrup@nadph.org</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2023</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>6</day>
        <month>12</month>
        <year>2023</year>
      </pub-date>
      <volume>2</volume>
      <elocation-id>e52888</elocation-id>
      <history>
        <date date-type="received">
          <day>18</day>
          <month>9</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>16</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>1</day>
          <month>11</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>5</day>
          <month>11</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Rachele Hendricks-Sturrup, Malaika Simmons, Shilo Anders, Kammarauche Aneni, Ellen Wright Clayton, Joseph Coco, Benjamin Collins, Elizabeth Heitman, Sajid Hussain, Karuna Joshi, Josh Lemieux, Laurie Lovett Novak, Daniel J Rubin, Anil Shanker, Talitha Washington, Gabriella Waters, Joyce Webb Harris, Rui Yin, Teresa Wagner, Zhijun Yin, Bradley Malin. Originally published in JMIR AI (https://ai.jmir.org), 06.12.2023.</copyright-statement>
      <copyright-year>2023</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on https://www.ai.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://ai.jmir.org/2023/1/e52888" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Artificial intelligence (AI) and machine learning (ML) technology design and development continues to be rapid, despite major limitations in its current form as a practice and discipline to address all sociohumanitarian issues and complexities. From these limitations emerges an imperative to strengthen AI and ML literacy in underserved communities and build a more diverse AI and ML design and development workforce engaged in health research.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>AI and ML has the potential to account for and assess a variety of factors that contribute to health and disease and to improve prevention, diagnosis, and therapy. Here, we describe recent activities within the Artificial Intelligence/Machine Learning Consortium to Advance Health Equity and Researcher Diversity (AIM-AHEAD) Ethics and Equity Workgroup (EEWG) that led to the development of deliverables that will help put ethics and fairness at the forefront of AI and ML applications to build equity in biomedical research, education, and health care.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>The AIM-AHEAD EEWG was created in 2021 with 3 cochairs and 51 members in year 1 and 2 cochairs and ~40 members in year 2. Members in both years included AIM-AHEAD principal investigators, coinvestigators, leadership fellows, and research fellows. The EEWG used a modified Delphi approach using polling, ranking, and other exercises to facilitate discussions around tangible steps, key terms, and definitions needed to ensure that ethics and fairness are at the forefront of AI and ML applications to build equity in biomedical research, education, and health care.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The EEWG developed a set of ethics and equity principles, a glossary, and an interview guide. The ethics and equity principles comprise 5 core principles, each with subparts, which articulate best practices for working with stakeholders from historically and presently underrepresented communities. The glossary contains 12 terms and definitions, with particular emphasis on optimal development, refinement, and implementation of AI and ML in health equity research. To accompany the glossary, the EEWG developed a concept relationship diagram that describes the logical flow of and relationship between the definitional concepts. Lastly, the interview guide provides questions that can be used or adapted to garner stakeholder and community perspectives on the principles and glossary.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Ongoing engagement is needed around our principles and glossary to identify and predict potential limitations in their uses in AI and ML research settings, especially for institutions with limited resources. This requires time, careful consideration, and honest discussions around what classifies an engagement incentive as meaningful to support and sustain their full engagement. By slowing down to meet historically and presently underresourced institutions and communities where they are and where they are capable of engaging and competing, there is higher potential to achieve needed diversity, ethics, and equity in AI and ML implementation in health research.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>AI</kwd>
        <kwd>Delphi</kwd>
        <kwd>disparities</kwd>
        <kwd>disparity</kwd>
        <kwd>engagement</kwd>
        <kwd>equitable</kwd>
        <kwd>equities</kwd>
        <kwd>equity</kwd>
        <kwd>ethic</kwd>
        <kwd>ethical</kwd>
        <kwd>ethics</kwd>
        <kwd>fair</kwd>
        <kwd>fairness</kwd>
        <kwd>health disparities</kwd>
        <kwd>health equity</kwd>
        <kwd>humanitarian</kwd>
        <kwd>machine learning</kwd>
        <kwd>ML</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Recent events and academic literature have underscored a role for the field of artificial intelligence (AI) and machine learning (ML) technology to take all stakeholders’ impressions and concerns into account to inform approaches for achieving health equity [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. It has also become imperative to strengthen AI and ML literacy in underserved communities and build a more diverse workforce in AI and ML design and development. However, whether as a practice or as an academic discipline, AI and ML are not yet engineered to address all sociohumanitarian issues and complexities. This is especially true for socially and economically marginalized communities whose members are frequently unheard or have limited engagement in research, discovery, and innovation pipelines for cultivating shared prosperity.</p>
      <p>The general population still has limited knowledge about AI and ML, with 1 study reporting that only about one-quarter of people have heard of AI or ML, and only about half are at least somewhat aware of AI and ML [<xref ref-type="bibr" rid="ref6">6</xref>]. Furthermore, individuals and communities who are subject to potentially detrimental outcomes (persons with mental health care needs and disabilities, persons with marginalized racial or ethnic identities, etc) may be more aware of the potential harms of AI and ML, particularly when it comes to the risk of harm from bias [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. Thus, people who are presently or historically underserved or marginalized may be particularly concerned that they will be harmed by AI or ML technologies, especially in cases where AI or ML is used or applied without their awareness.</p>
      <p>The overall lack of understanding about AI and ML and the awareness of bias among historically and presently marginalized populations could result in limited trust in the technology and its use. To build trust among those most subject to bias or at risk of detrimental outcomes, it is critical for AI and ML developers to assess their own reliability and adapt their practices to build trustworthiness with the most vulnerable stakeholders. In this context, it is also important to recognize that trust varies across and within populations, and people may have more or less trust in health care technologies based on factors such as previous experience of racial bias [<xref ref-type="bibr" rid="ref9">9</xref>].</p>
      <p>If implemented responsibly, AI and ML has the power to account for and assess a variety of factors that contribute to health and disease to improve prevention, diagnosis, and therapy. The ability to predict the risk of adverse health outcomes and identify high-risk patients for targeted preventive interventions offers tremendous potential to improve the health of individuals and medically underserved populations [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>].</p>
      <p>A great deal of AI and ML today is developed without meaningful engagement of individuals and communities, even when those individuals and communities have (knowingly or unknowingly) generated data used by AI and ML models. When there are proactive efforts to engage communities in AI and ML design, development, or application, various factors may negatively affect how people respond (<xref ref-type="boxed-text" rid="box1">Textbox 1</xref>). For instance, failure to educate about AI and ML and contextualize its impact on an individual and their community may bias individuals’ consent to contribute data to build such technologies and, subsequently, lead to biased outcomes in terms of who benefits from the technology’s development and application. Consequently, poor engagement can exacerbate inequities in the creation, development, and application of AI and ML.</p>
      <boxed-text id="box1" position="float">
        <title>Factors that may engender inequitable access to artificial intelligence (AI) and machine learning (ML) or demotivate participation in AI and ML.</title>
        <p>
          <bold>Factors that demotivate participation in AI and ML</bold>
        </p>
        <list list-type="bullet">
          <list-item>
            <p>Cultural norms or expectations that discourage the use of AI and ML technology</p>
          </list-item>
          <list-item>
            <p>Fear and reservations that the AI and ML tool may be used to cause harm</p>
          </list-item>
          <list-item>
            <p>The history of major AI and ML–developing institutions is not inclusive of all communities, thus defying communities’ trust</p>
          </list-item>
          <list-item>
            <p>The lack of access to high-performance infrastructure and resources needed to execute AI and ML models</p>
          </list-item>
          <list-item>
            <p>The lack of interest, excitement, or perception of “hype”</p>
          </list-item>
          <list-item>
            <p>Unaddressed confusion, misinformation, or disillusionment</p>
          </list-item>
        </list>
        <p>
          <bold>Factors that exacerbate inequitable access to the benefit of AI and ML</bold>
        </p>
        <list list-type="bullet">
          <list-item>
            <p>Asymmetric ability to extract value from AI and ML</p>
          </list-item>
          <list-item>
            <p>Insufficient access to the internet, data, and data services (ie, digital divide)</p>
          </list-item>
          <list-item>
            <p>Insufficient funding or economic opportunities</p>
          </list-item>
          <list-item>
            <p>There is an intractable disagreement and power imbalance between stakeholders about how AI and ML should be used or applied</p>
          </list-item>
          <list-item>
            <p>Lack of institutional leadership or commitment</p>
          </list-item>
          <list-item>
            <p>Limited experience, knowledge, and education</p>
          </list-item>
          <list-item>
            <p>Sociocultural factors affecting digital access and inclusion</p>
          </list-item>
        </list>
      </boxed-text>
      <p>The underengagement of communities in research, development, and use of AI and ML often reflects limited knowledge and crucial misunderstandings about AI and ML, including how it is used in health care settings to advance health-related innovations and solutions. Thus, stronger, more targeted, and more intentional engagement is required to help these groups identify and address real or potential harms associated with the problematic implementation of AI and ML in high-consequence settings. To address this challenge, the US National Institutes of Health’s Artificial Intelligence/Machine Learning Consortium to Advance Health Equity and Researcher Diversity (AIM-AHEAD) was established in 2021 with a mission to address factors that undermine achieving health equity through the design, use, and application of AI and ML, including the lack of the following:</p>
      <list list-type="bullet">
        <list-item>
          <p>An adequately diverse workforce</p>
        </list-item>
        <list-item>
          <p>Adequate data and data infrastructure</p>
        </list-item>
        <list-item>
          <p>Adequate community engagement</p>
        </list-item>
        <list-item>
          <p>Adequate oversight, governance, and accountability</p>
        </list-item>
        <list-item>
          <p>Consensus that ethics can strengthen innovation</p>
        </list-item>
      </list>
      <p>The tension between individual desires and population needs challenges ethics and equity in AI and ML settings. Thus, the Ethics and Equity Workgroup (EEWG) was formed within the AIM-AHEAD Consortium to ensure that ethics and fairness are at the forefront of AI and ML applications to build equity in biomedical research, education, and health care. Activities within the workgroup have included deliberations and discussions to develop and reach consensus on actionable guiding principles, a glossary of key terms, and other engagement tools to encourage greater attention to ethics and equity in AI and ML development. This study describes these activities with the intent to serve and inform the AIM-AHEAD community of stakeholders; external consortia, organizations, and communities that have goals similar to the AIM-AHEAD; and those interested in ethical and equitable AI and ML development and applications more broadly.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Workgroup Establishment</title>
        <p>The AIM-AHEAD EEWG was created in 2021 to guide the ethical and equitable development and implementation of AI and ML tools and processes broadly within the AIM-AHEAD. Simultaneously, an Equitable Policy Development Workgroup was developed within the AIM-AHEAD Infrastructure Core. To ensure rapid and coordinated progress with respect to embedding ethics and equity into AIM-AHEAD activities, both within and outside of the Infrastructure Core, the EEWG’s efforts were harmonized and merged with the Infrastructure Core’s Equitable Policy Development Workgroup upon recommendation by the EEWG cochair and multiple principal investigators for the AIM-AHEAD Infrastructure Core. The newly reconfigured EEWG began by defining its scope of activities (<xref rid="figure1" ref-type="fig">Figure 1</xref>).</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Artificial Intelligence/Machine Learning Consortium to Advance Health Equity and Researcher Diversity (AIM-AHEAD) Ethics and Equity Workgroup’s scope of activities. AI: artificial intelligence.</p>
          </caption>
          <graphic xlink:href="ai_v2i1e52888_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Workgroup Membership</title>
        <p>At the start of the program in year 1, the EEWG was comprised of 51 members (AIM-AHEAD principal investigators and coinvestigators) and 3 cochairs. AIM-AHEAD participants either requested to join or were selected to join by their project leaders within the program. During year 2, the EEWG’s membership was consolidated into 2 cochairs and approximately 40 AIM-AHEAD principal investigators, coinvestigators, leadership fellows, and research fellows. This reduction in EEWG cochairs and members occurred for two main reasons: (1) time and effort among members were reallocated to other activities within the AIM-AHEAD (administrative planning for regional hubs, research, etc), and (2) given the evolution of the program over time, the year 1 members were provided an opportunity to recommit to the EEWG for year 2. In both years, EEWG cochairs and members represented a variety of academic disciplines and focus areas, including but not limited to medicine, computational science, population health, health science, data science, bioethics, law, community engagement, human-centered design, health disparities research, biological science, social science, and engineering.</p>
      </sec>
      <sec>
        <title>Development of a Set of Ethical Principles for AI and ML</title>
        <p>The initial effort of the EEWG during year 1 was to produce a set of principles and a glossary to inform the practice of ethics and equity in AI and ML development and implementation in health research. During year 1, members convened in weekly meetings that led to consensus on the development of specific workgroup deliverables. EEWG members reviewed the literature to identify relevant sources with perspectives on ethics, equity, and social determinants of health, especially those that were community driven, and lessons that could inform the development and use of AI and ML in health disparity and disease prevention research [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref27">27</xref>].</p>
        <p>To develop the principles, the EEWG used a modified Delphi approach to facilitate discussions around tangible steps that the Consortium should take to ensure that ethics and fairness are at the forefront of AI and ML applications to build equity in biomedical research, education, and health care [<xref ref-type="bibr" rid="ref28">28</xref>]. Specifically, the EEWG engaged in weekly (year 1) and biweekly (year 2) meetings to suggest, review, and deliberate a corpus of published content and literature considered useful toward integrating ethics and equity into AI and ML development and contributed original thought leadership and content in reaction to the content and literature reviewed to devise actionable principles. The EEWG approached the development of the principles with optimism about the potential of AI and ML to address health disparities by empowering communities, yet with recognition of complex societal challenges: inadequate or misrepresentation in data sets, algorithmic bias, imbalances in communities’ access to data and information about themselves, misuses of AI and ML tools, and threats to the civil and human rights of individuals and communities who are or may be subject to illegal or pervasive AI and ML surveillance, to name just a few.</p>
      </sec>
      <sec>
        <title>Development of a Glossary</title>
        <p>To develop the glossary, during year 1, the EEWG began by defining ways in which outputs of AI and ML can (1) fail to be informative or useful for individuals and groups; (2) distinguish among individuals in inappropriate ways as a result of bias, failure of inclusion, or misuse; or (3) be poorly vetted by individuals and groups who are or may be subject to potentially harmful actions and decisions made by key or authoritative stakeholders that rely on AI and ML for decision support as a result of insufficient engagement with key stakeholders, including data participants.</p>
        <p>Using a modified Delphi approach that likewise involved polling, ranking, and other exercises, consensus was reached on terms to define [<xref ref-type="bibr" rid="ref29">29</xref>]. During its meetings, the EEWG discussed all possible terms that would be key to define to inform the ethical and equitable development and application of AI and ML, followed by 2 rounds of ranking and polling exercises to narrow their suggestions to 12 sentinel terms. Sentinel terms discussed during meetings, for example, included demographical terms such as self-defined or assigned race, ethnicity, sex, ability, and gender that can lead to errors in the development of AI and ML, which can in turn lead to potentially irreversible, intergenerational, and multigenerational harm to individuals and groups subjected to decisions informed by or based on AI and ML outputs. During year 2, remote meetings were held on a biweekly basis to further deliberate and refine the principles and glossary. Refinements were based on expert stakeholder feedback gathered through a survey among participants in the AIM-AHEAD pilot project and during remote convenings.</p>
      </sec>
      <sec>
        <title>Development of an Interview Guide</title>
        <p>The EEWG initially sought to conduct a quantitative survey to assess how AIM-AHEAD researchers would implement the principles in practice. A draft survey was developed by 2 volunteers within the workgroup, who later shared the draft survey with the broader workgroup for iterative feedback and edits during weekly (year 1) and biweekly (year 2) meetings. The draft survey was also shared with awardees of AIM-AHEAD pilot projects for feedback. As the EEWG deliberated on the feedback, it ultimately determined that a qualitative interview (vs a quantitative survey) would be a more useful approach to garnering AIM-AHEAD researchers’ perspectives on implementing the principles in practice. Thereafter, the EEWG met regularly to convert the quantitative survey into an interview guide with the intent of learning the interviewees perspectives and natural reactions to the AIM-AHEAD ethics and equity principles and glossary.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>The EEWG’s efforts in developing the interview guide and conducting the interviews were focused exclusively on program-specific planning for the AIM-AHEAD and were not intended as human subjects research. AIM-AHEAD investigators’ responses to the interviews were wholly voluntary, and their comments were used exclusively to develop the program’s principles and were subject to further assessment for generalizable knowledge.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>AIM-AHEAD Ethics and Equity Principles</title>
        <sec>
          <title>Overview</title>
          <p>Based on the EEWG’s internal Delphi process, informed by insights from interviews with AIM-AHEAD investigators, the workgroup articulated 5 core principles, each with subparts, which articulate best practices for working with stakeholders from historically and presently underrepresented communities.</p>
          <list list-type="order">
            <list-item>
              <p>Build trust with communities</p>
            </list-item>
            <list-item>
              <p>Design and implement AI and ML with intention</p>
            </list-item>
            <list-item>
              <p>Cocreate, do not dictate</p>
            </list-item>
            <list-item>
              <p>Build capacity</p>
            </list-item>
            <list-item>
              <p>Reset the rules</p>
            </list-item>
          </list>
        </sec>
        <sec>
          <title>Build Trust With Communities</title>
          <p>Researchers should build trust and share power to enable data-driven decision-making among multiple partners—this must be earned through longstanding, sustained relationships in the community, which takes time, investment, and resources to manifest.</p>
          <list list-type="bullet">
            <list-item>
              <p>Through authentic community engagement, determine, understand, and deliver value in a manner that is community driven, community defined, and community led.</p>
            </list-item>
            <list-item>
              <p>Use asset-based language and thinking in collecting, interpreting, and reporting community-level data (in lieu of deficit-based language and thinking).</p>
            </list-item>
            <list-item>
              <p>Be transparent about the structure of AI models, data that are contextually limited or incomplete, and limitations in the capabilities of data analytics tools and platforms.</p>
            </list-item>
            <list-item>
              <p>Commit to ongoing engagement and bidirectional communication between AI and ML developers and communities around interventions to address limitations in the capabilities of data analytics tools and platforms.</p>
            </list-item>
          </list>
        </sec>
        <sec>
          <title>Design and Implement AI and ML With Intention</title>
          <p>Researchers should take collective action and engage in data-driven decision-making toward embedding equity, which requires shared goal setting, design, implementation, and accountability.</p>
          <list list-type="bullet">
            <list-item>
              <p>Determine shared goals that serve as a commitment anchor and barometer for cocreated actions.</p>
            </list-item>
            <list-item>
              <p>Design with intent to overcome root causes of bias to solve or address (vs merely explore) an immediate, ongoing, or systemic problem affecting communities experiencing certain hardships that have contributed to health inequity.</p>
            </list-item>
            <list-item>
              <p>Develop and implement ongoing AI and ML design mechanisms and procedures to monitor AI and ML algorithms with the goal of preventing or mitigating harm.</p>
            </list-item>
          </list>
        </sec>
        <sec>
          <title>Cocreate, Do Not Dictate</title>
          <p>Researchers should move from superficial community engagement to true community partnership through meaningful cocreation.</p>
          <list list-type="bullet">
            <list-item>
              <p>Develop AI and ML infrastructure, protocols, and programs in partnership with key and affected community stakeholders.</p>
            </list-item>
            <list-item>
              <p>Avoid tokenizing individuals and communities to achieve asymmetric goals that are or can be perceived as to the detriment of communities.</p>
            </list-item>
            <list-item>
              <p>Limit the use of computational methods that are or can be perceived as a substitution for data that would be only obtained through strong community engagement.</p>
            </list-item>
            <list-item>
              <p>Be transparent about the short-, medium-, and long-term sponsorships, investors in, and potential beneficiaries of AI and ML projects.</p>
            </list-item>
          </list>
        </sec>
        <sec>
          <title>Build Capacity</title>
          <p>Researchers should invest in people, data, and computational technology—today, as community leaders dig into this work, and tomorrow, as society collectively builds a stronger, more diverse tech talent pipeline.</p>
          <list list-type="bullet">
            <list-item>
              <p>Educate stakeholders to enable AI and ML competency across clinical practice, community, and research settings (eg, build AI and ML model fact labels that can summarize or explain algorithms).</p>
            </list-item>
            <list-item>
              <p>Develop a plan to promote eHealth literacy in marginalized and underserved communities and groups.</p>
            </list-item>
            <list-item>
              <p>Build equitable access to AI and ML technology, its development, applications, and uses across real-world health contexts including social determinants of health and research.</p>
            </list-item>
            <list-item>
              <p>Develop a plan for building capacity that includes hiring and supporting a diverse workforce, dedicating funds for sustaining an existing workforce, and creating metrics that allow institutions to measure their success.</p>
            </list-item>
          </list>
        </sec>
        <sec>
          <title>Reset the Rules</title>
          <p>Researcher should reexamine the mechanisms that hold institutions accountable and resist the urgency of quick fixes to complex issues like systemic racism.</p>
          <list list-type="bullet">
            <list-item>
              <p>Engage communities to determine their experiences with and desires to overcome the digital divide and facilitate the equitable inclusion and consideration of populations in AI and ML models and algorithms.</p>
            </list-item>
            <list-item>
              <p>Create equitable and liberated access to AI and ML development, implementation, and maintenance to oversee and correct model drift and guide entities in their reactions to AI and ML outputs.</p>
            </list-item>
            <list-item>
              <p>Identify and correct information asymmetries that may lead to communities’ lacking pertinent, actionable, and critical information that is exclusively held by powerful institutions.</p>
            </list-item>
          </list>
        </sec>
      </sec>
      <sec>
        <title>AIM-AHEAD Ethics and Equity Glossary Terms</title>
        <p>Developers of AI and ML platforms and tools must contemplate, anticipate, mitigate, and address potential issues with downstream data aggregation, interpretation, and use. Meeting these goals requires a shared understanding of the terms used in these policies and processes. The EEWG determined that, in many cases, sensitive demographic characteristics (eg, race, ethnicity, sex, ability, and gender) are particularly problematic as variables used in AI and ML because they are often inappropriately understood as being rooted solely or primarily in genetic or phenotypic differences rather than strongly influenced by discriminatory sociohistorical and sociocultural practices.</p>
        <p>To capture and promote a shared understanding of key terms, the EEWG developed a glossary of 12 words (<xref ref-type="table" rid="table1">Table 1</xref>) out of 28 considered that follow or build upon existing understandings of these concepts, highlighting their particular importance for the optimal development, refinement, and implementation of AI and ML.</p>
        <p>In addition, the EEWG developed a concept relationship diagram that describes the logical flow of and relationship between the definitional concepts described in <xref ref-type="table" rid="table1">Table 1</xref> and <xref rid="figure2" ref-type="fig">Figure 2</xref>. The center of this diagram is equity, which requires AI developers and implementers to enforce fairness and avoid bias in a population with sufficient diversity by being inclusive. To implement diversity, representatives that are characterized by a minimal set of aspects—ethnicity, race, gender, and sexual orientation—need to be collected. They will form a representative sample if they can reflect the characteristics of a population. A representative sample can mitigate algorithmic bias, which is one specific type of bias.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Artificial Intelligence/Machine Learning Consortium to Advance Health Equity and Researcher Diversity (AIM-AHEAD) ethics and equity glossary terms and definitions.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="60"/>
            <col width="220"/>
            <col width="720"/>
            <thead>
              <tr valign="top">
                <td>No</td>
                <td>Glossary term</td>
                <td>AIM-AHEAD definition</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>1</td>
                <td>Ethnicity</td>
                <td>Distinct patterns of language, lifestyle, illness, and health beliefs encountered among an individual or representative population, regardless of race, and that may subject the individual or population to bias or discrimination.</td>
              </tr>
              <tr valign="top">
                <td>2</td>
                <td>Race</td>
                <td>A social construct or assumption based on patterns in an individual’s or representative population’s language, lifestyle, and health beliefs and immutable characteristics, such as skin tone, color, or hair texture, regardless of immigration status, socioeconomic status, genetic ancestry, or geographic origin, that may subject the individual’s or population to bias, structural racism, or discrimination that would warrant corrective antiracism actions.</td>
              </tr>
              <tr valign="top">
                <td>3</td>
                <td>Bias</td>
                <td>Systematic error in information originating, gathering, or assessment activities, leading to selecting or encouraging one outcome or answer over others, which can result in human decisions and values that echo societal or historical inequities and produce inconclusive or limited assumptions about the broader population.</td>
              </tr>
              <tr valign="top">
                <td>4</td>
                <td>Equity</td>
                <td>Equity is fairness and justice in policy, practice, and opportunity designed to address the distinct challenges of nondominant social groups with an eye to progressive outcomes. Health equity is the state in which everyone has the opportunity to attain full health potential, and no individual is disadvantaged from achieving this potential because of social position or any other socially defined circumstance.</td>
              </tr>
              <tr valign="top">
                <td>5</td>
                <td>Algorithmic bias</td>
                <td>Systematic and repeated errors in the collection and consideration of a variety of factors, including but not limited to the design of the algorithm; unintended or unanticipated use or decisions relating to the way data are collected, represented, or used; lack of sensitivity to identity factors that contribute to bias in the evaluation of the algorithm, or misappropriation of the algorithm through miscommunicating or misunderstanding its limitations.</td>
              </tr>
              <tr valign="top">
                <td>6</td>
                <td>Diversity</td>
                <td>The wide variety of shared and different personal and group characteristics among human beings. There are many kinds of diversity, including gender, sexual orientation, class, age, country of origin, education, religion, geography, physical or cognitive abilities, or other characteristics. Valuing diversity means recognizing differences between people, acknowledging that these differences are a valued asset, and striving for diverse representation as a critical step toward equity.</td>
              </tr>
              <tr valign="top">
                <td>7</td>
                <td>Inclusive</td>
                <td>Avoiding bias by providing equitable and open access to opportunities and resources for engagement. This can be accomplished, for example, by enforcing fairness in the data collection methods, enforcing fairness in the assignment of labels, developing explainable, transparent, and interpretable models, having diverse teams monitor models, and looking for biases and eliminating them.</td>
              </tr>
              <tr valign="top">
                <td>8</td>
                <td>Fairness</td>
                <td>Intent to promote nondiscrimination and population representation when assessing a group’s eligibility for a benefit or penalty. This is particularly important given the statistical likelihood that artificial intelligence and machine learning systems could produce discriminatory outputs once algorithms are implemented across one or more data sets.</td>
              </tr>
              <tr valign="top">
                <td>9</td>
                <td>Representative<break/>  <break/>  </td>
                <td>An individual or body chosen or appointed to act or speak for an individual, population, or subpopulation sharing a set of features or characteristics, including but not limited to gender, race, or sexual orientation.</td>
              </tr>
              <tr valign="top">
                <td>10</td>
                <td>Representative sample</td>
                <td>A subset of a population that reflects the characteristics of the entire population from which it has been selected.</td>
              </tr>
              <tr valign="top">
                <td>11</td>
                <td>Gender identity</td>
                <td>An individual’s sense of oneself as male, female, or something else. When an individual’s gender identity and biological sex are not congruent, the individual may identify along the transgender spectrum. An individual may choose to change their gender one or more times. Varying cultural indicators of gender, such as clothing choice, speech patterns, and personality traits, relate to gender but are not acceptable means to determine another’s gender identity. The change in an individual’s gender can be used to abuse, discriminate against, and misrepresent individuals and groups.</td>
              </tr>
              <tr valign="top">
                <td>12</td>
                <td>Sexual orientation</td>
                <td>An individual’s capacity for attraction to and sexual activity with the same or different sex. An individual’s sexual orientation is indicated by one or more of the following: how an individual identifies their own sexual orientation, an individual’s capacity for experiencing sexual and affectional attraction to people of the same or different gender, and an individual’s sexual behavior with people of the same or different gender. Sexual orientation incorporates three core ideas: consensual human relationships—sexual, romantic, or both—the biological sex of an individual’s actual or potential relationship partners, and enduring patterns of experience and behavior. Sexual minorities, or people whose sexual orientation does not conform to heteronormative cultural expectations, are vulnerable to violence and discrimination.</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Definitional concepts of Artificial Intelligence/Machine Learning Consortium to Advance Health Equity and Researcher Diversity (AIM-AHEAD) ethics and equity glossary terms.</p>
          </caption>
          <graphic xlink:href="ai_v2i1e52888_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Interview Guide</title>
        <p>As mentioned, extensive and iterative feedback received during the development of the quantitative survey led the EEWG cochairs and members to determine that a qualitative engagement approach is warranted to facilitate meaningful and diverse stakeholder engagement to disseminate and facilitate implementation of the principles and glossary. Therefore, the EEWG developed an interview guide that can be used or adapted to garner and understand AIM-AHEAD members’ and other community perspectives on the principles and glossary. The interview guide is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Overview</title>
        <p>The role of those who will be affected by the findings of the research enterprise has evolved from their initial role as objects, as illustrated in the iconic painting of Edward Jenner administering the life-saving inoculation of the English boy with cowpox in 1796, the multiepisode television documentary “Microbes and Men,” and the abuses of Black men in the US Public Health Service Study of the natural history of untreated syphilis at Tuskegee [<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref33">33</xref>]. Over time, more attention has been devoted to assessing the potential harms and benefits of research to the people who are studied, albeit primarily as viewed by investigators, typically White men, and institutional review boards, typically comprised of researchers with minimal or latent community involvement. Incentivizing representation of nonscientific, nonaffiliate community members on institutional review boards, engaging members of historically underrepresented groups in more visible roles as investigators, and engaging minority-serving institutions as partners in AI and ML research is necessary to promote equitable access to opportunities and careers in AI and ML. Such an intentional approach also, importantly, demonstrates an appreciation for local knowledge and facilitates the design of more culturally informed interventions that consider how research will affect heterogeneous populations being studied in AI and ML research. This form of appreciation is necessary for tailoring engagement to the needs of diverse groups and understanding how to overcome barriers to AI and ML research and use [<xref ref-type="bibr" rid="ref34">34</xref>].</p>
        <p>Beyond promoting diverse and equitable opportunities for participation in AI and ML research, it is necessary to recognize the need to translate that work into actual practice, which historically has also been a barrier to health equity. For example, the association of the lower-quality data measured by pulse oximetry with dark skin tones has long been known, and there have been versions of the technology designed to account for this discrepancy, but versions of pulse oximeters with biased tendencies remain in wide use [<xref ref-type="bibr" rid="ref35">35</xref>]. There is a real risk that AI and ML technology will follow a similar pathway if there is not sufficient action to build ethics and equity into the research.</p>
        <p>Overall, our effort reported here achieves 2 goals. The first is to describe what is needed procedurally and substantively to achieve equity. This is a complex process that must take place and evolve over time. It cannot be addressed as a 1-time event or by filling out a checklist. Achieving equity requires rebalancing the interests at stake in research, which, at a minimum, means truly considering and addressing the interests of the people who will be affected by the results. Ideally, research participants can become cocreators as ethics in AI and ML and related ethical principles evolve into more commonly accepted policies and practices. The second goal of this reported effort is to emphasize that addressing equity requires an inclusive, ongoing process with a shared understanding of salient terms that will evolve over time. Recent engagements within the AIM-AHEAD program have noted this to be true even for terms like AI and ML, as today very few stakeholders have been able to clearly articulate how AI and ML can be or is used in the real world [<xref ref-type="bibr" rid="ref34">34</xref>]. New and ongoing national initiatives, such as the National Academy of Medicine’s AI Code of Conduct project, which intends to develop a “code of conduct for the development and use of AI in health, medical care, and health research,” are encouraged to learn from the EEWG’s efforts [<xref ref-type="bibr" rid="ref36">36</xref>].</p>
        <p>Our work builds on and can be incorporated into current AI and ML ethics and equity frameworks and policies within and outside of the United States, focused on improving population health through broad community involvement in AI and ML application development [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref38">38</xref>]. This includes, but is not limited to, the National Institutes of Health’s policies and programs on AI and ML application development in health research; policy developments undertaken by the US Senate Health, Education, Labor, and Pensions Committee; the National Academy of Medicine’s Artificial Intelligence Code of Conduct project; the European Commission’s Guidelines for Trustworthy AI; Asilomar AI Principles; and lastly and importantly, a groundbreaking and recent US White House Executive Order explicitly supporting the mission of the AIM-AHEAD [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref42">42</xref>].</p>
        <p>Importantly, our work provides a complementary, fundamental, and basic blueprint or process, along with operational tools and building blocks, to educate stakeholders on this practice of creating safe spaces and setting culture tones for diverse stakeholder engagement and consensus around best practices and shared terminology. Also importantly, our tools enable the collection of ongoing and iterative feedback concerning the local implementation of our principles and glossary. Iterations may be further disseminated, along with public-facing endorsements of the principles and glossary in their current form, by like-minded stakeholders seeking to ensure that researcher diversity, community, and social justice concerns influence AI and ML application development processes in health research and, broadly, science and technology.</p>
        <p>Inclusive and ongoing processes to develop a shared understanding of salient terms like AI and ML and those described in our glossary require more time, greater inclusion, and deeper incorporation of diverse community perspectives. This approach differs drastically from the typical project life cycles afforded by the gold rush mentality that has emerged with AI and ML today. Therefore, one key step, moving forward, would be to persuade leaders in the AI and ML research enterprise to broadly disseminate the lessons that may be learned in operationalizing our EEWG principles and glossary. Programs such as the AIM-AHEAD need to objectively assess their administrative processes and evaluation criteria for what constitutes ethical and equitable opportunities for an AI and ML investigation, including investigator inclusion, data governance, data sources, and data infrastructure.</p>
        <p>There are limitations to consider in our process and recommendations. First, the EEWG has continuously revisited the principles and glossary for potential editing based on the members’ evolving experience and expert opinions, even though making these deliverables “living documents” complicates the process of achieving sustainable consensus. Nonetheless, the principles and glossary will require reflection, appreciation, and adjustments over time to account for the effects of real-world events, human choices, or interpersonal phenomena from relevant perspectives. Also, some of our proposed glossary terms may already be limited in scope with respect to real-world events and phenomena. For instance, although our definition of “representative” concerns “an individual or body chosen or appointed to act or speak for an individual, population, or subpopulation,” there are certain matters in which a representative may be self-appointed without specific authorization from those they wish to represent.</p>
        <p>Therefore, ongoing engagement around the use of our principles and glossary in AI and ML research settings is encouraged to maximize their potential benefits and minimize any potential harm. However, ongoing engagement with institutions that have limited resources to support their full participation requires careful consideration and discussion of how to incentivize, support, and sustain meaningful engagement beyond mere compensation. One way to accomplish this is to seek institutional input through authentic connections to determine what they consider a valuable investment for their time, instead of deciding for them. For example, such connections can be made both within and outside of conferences, convenings, and events hosted by minority-serving institutions nationwide (eg, the Annual Biomedical Research Conference for Minoritized Scientists or the National Society of Black Engineers’ Annual Convention).</p>
      </sec>
      <sec>
        <title>Conclusions and Next Steps</title>
        <p>An overemphasis on speed or velocity works against taking the time needed to foster the inclusion of historically and presently underrepresented communities in the development of AI and ML, ultimately rewarding AI and ML “haves” over “have-nots.” In the private sector (eg, big technology companies and startups), the pace of AI and ML development is extremely rapid and difficult to manage. Inequitable divisions in access to resources like computers, smartphones, and the internet have vastly decreased over the past decade. Yet, AI and ML technology that is used with adequate operational know-how and e-literacy, cost of use, human resources and staffing needs to maintain cyberinfrastructure, and many other technical and nontechnical resources, is where these inequitable divisions can be addressed.</p>
        <p>An equity-oriented public sector intervention, such as the AIM-AHEAD, can be more effective in achieving diversity and inclusion goals by emphasizing actions that do not sacrifice trust-building for the sake of rapid development of technology, especially in the initial stages. By slowing down to meet historically and presently underresourced institutions and communities where they are and where they are capable of engaging and competing, we can more effectively evaluate AI and ML implementation and results for bias over time and expand the potential to achieve the aims of ethics and equity. We envision a virtuous cycle of shared learning, building on our EEWG deliverables, that may bridge researchers and impacted communities into a new intersection of computational sciences, ethics, and health equity.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Interview guide.</p>
        <media xlink:href="ai_v2i1e52888_app1.docx" xlink:title="DOCX File , 15 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AIM-AHEAD</term>
          <def>
            <p>Artificial Intelligence/Machine Learning Consortium to Advance Health Equity and Researcher Diversity</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">EEWG</term>
          <def>
            <p>Ethics and Equity Workgroup</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">ML</term>
          <def>
            <p>machine learning</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The authors would like to acknowledge Artificial Intelligence/Machine Learning Consortium to Advance Health Equity and Researcher Diversity (AIM-AHEAD) Ethics and Equity Workgroup (EEWG) members engaged during years 1 and 2 of the consortium.</p>
      <p>The activities reported in this publication were supported by the Office of the Director, National Institutes of Health Common Fund, under award 1OT2OD032581. The funders had no role in study design, data collection and analysis, the decision to publish, or the preparation of the manuscript. More information can be found on the official website [<xref ref-type="bibr" rid="ref43">43</xref>].</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>RHS is employed by the Duke-Margolis Center for Health Policy. BM is the Coeditor in Chief of JMIR AI but was excluded from the review of the manuscript. The other authors declare that they have no conflicts of interest.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayers</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Poliak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dredze</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Leas</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Kelley</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Faix</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Longhurst</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Hogarth</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum</article-title>
          <source>JAMA Intern Med</source>
          <year>2023</year>
          <month>06</month>
          <day>01</day>
          <volume>183</volume>
          <issue>6</issue>
          <fpage>589</fpage>
          <lpage>596</lpage>
          <pub-id pub-id-type="doi">10.1001/jamainternmed.2023.1838</pub-id>
          <pub-id pub-id-type="medline">37115527</pub-id>
          <pub-id pub-id-type="pii">2804309</pub-id>
          <pub-id pub-id-type="pmcid">PMC10148230</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ross</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Google is training its generative AI to analyze medical images—and talk to doctors about them</article-title>
          <source>STAT</source>
          <year>2023</year>
          <access-date>2023-07-30</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.statnews.com/2023/05/10/google-artificial-intelligence-ai-medpalm2-health/">https://www.statnews.com/2023/05/10/google-artificial-intelligence-ai-medpalm2-health/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vyas</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Eisenstein</surname>
              <given-names>LG</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>DS</given-names>
            </name>
          </person-group>
          <article-title>Hidden in plain sight—reconsidering the use of race correction in clinical algorithms</article-title>
          <source>N Engl J Med</source>
          <year>2020</year>
          <volume>383</volume>
          <issue>9</issue>
          <fpage>874</fpage>
          <lpage>882</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.nejm.org/doi/10.1056/NEJMms2004740"/>
          </comment>
          <pub-id pub-id-type="doi">10.1056/NEJMms2004740</pub-id>
          <pub-id pub-id-type="medline">32853499</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Doshi</surname>
              <given-names>RH</given-names>
            </name>
            <name name-style="western">
              <surname>Bajaj</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Promises—and pitfalls—of ChatGPT-assisted medicine</article-title>
          <source>STAT</source>
          <year>2023</year>
          <access-date>2023-07-30</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.statnews.com/2023/02/01/promises-pitfalls-chatgpt-assisted-medicine/">https://www.statnews.com/2023/02/01/promises-pitfalls-chatgpt-assisted-medicine/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Castillo</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Tools to predict stroke risk work less well for Black patients, study finds</article-title>
          <source>STAT</source>
          <year>2023</year>
          <access-date>2023-07-30</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.statnews.com/2023/02/22/stroke-risk-machine-learning-models/">https://www.statnews.com/2023/02/22/stroke-risk-machine-learning-models/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aggarwal</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Farag</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ashrafian</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Darzi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Patient perceptions on data sharing and applying artificial intelligence to health care data: cross-sectional survey</article-title>
          <source>J Med Internet Res</source>
          <year>2021</year>
          <volume>23</volume>
          <issue>8</issue>
          <fpage>e26162</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2021/8/e26162/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/26162</pub-id>
          <pub-id pub-id-type="medline">34236994</pub-id>
          <pub-id pub-id-type="pii">v23i8e26162</pub-id>
          <pub-id pub-id-type="pmcid">PMC8430862</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Timmons</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Duong</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Simo Fiallo</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Vo</surname>
              <given-names>HPQ</given-names>
            </name>
            <name name-style="western">
              <surname>Ahle</surname>
              <given-names>MW</given-names>
            </name>
            <name name-style="western">
              <surname>Comer</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Brewer</surname>
              <given-names>LC</given-names>
            </name>
            <name name-style="western">
              <surname>Frazier</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>Chaspari</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>A call to action on assessing and mitigating bias in artificial intelligence applications for mental health</article-title>
          <source>Perspect Psychol Sci</source>
          <year>2023</year>
          <volume>18</volume>
          <issue>5</issue>
          <fpage>1062</fpage>
          <lpage>1096</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36490369"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/17456916221134490</pub-id>
          <pub-id pub-id-type="medline">36490369</pub-id>
          <pub-id pub-id-type="pmcid">PMC10250563</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rich</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Who is included in human perceptions of AI?: trust and perceived fairness around healthcare AI and cultural mistrust</article-title>
          <year>2021</year>
          <conf-name>CHI '21: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>May 8-13, 2021</conf-date>
          <conf-loc>Yokohama, Japan</conf-loc>
          <publisher-loc>New York, NY, US</publisher-loc>
          <publisher-name>Association for Computing Machinery</publisher-name>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <pub-id pub-id-type="doi">10.1145/3411764.3445570</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>SS</given-names>
            </name>
          </person-group>
          <article-title>Race and trust</article-title>
          <source>Annu Rev Sociol</source>
          <year>2010</year>
          <volume>36</volume>
          <issue>1</issue>
          <fpage>453</fpage>
          <lpage>475</lpage>
          <pub-id pub-id-type="doi">10.1146/annurev.soc.012809.102526</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hai</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Weiner</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Paranjape</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Livshits</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Obradovic</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Rubin</surname>
              <given-names>DJ</given-names>
            </name>
          </person-group>
          <article-title>Deep learning vs traditional models for predicting hospital readmission among patients with diabetes</article-title>
          <source>AMIA Annu Symp Proc</source>
          <year>2022</year>
          <volume>2022</volume>
          <fpage>512</fpage>
          <lpage>521</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37128461"/>
          </comment>
          <pub-id pub-id-type="medline">37128461</pub-id>
          <pub-id pub-id-type="pii">1150</pub-id>
          <pub-id pub-id-type="pmcid">PMC10148287</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rubin</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Gogineni</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Deak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vaz</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Watts</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Recco</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Dillard</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Karunakaran</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kondamuri</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Naylor</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Golden</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Allen</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The Diabetes Transition of Hospital Care (DiaTOHC) pilot study: a randomized controlled trial of an intervention designed to reduce readmission risk of adults with diabetes</article-title>
          <source>J Clin Med</source>
          <year>2022</year>
          <volume>11</volume>
          <issue>6</issue>
          <fpage>1471</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jcm11061471"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jcm11061471</pub-id>
          <pub-id pub-id-type="medline">35329797</pub-id>
          <pub-id pub-id-type="pii">jcm11061471</pub-id>
          <pub-id pub-id-type="pmcid">PMC8949063</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cerrato</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Halamka</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pencina</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A proposal for developing a platform that evaluates algorithmic equity and accuracy</article-title>
          <source>BMJ Health Care Inform</source>
          <year>2022</year>
          <volume>29</volume>
          <issue>1</issue>
          <fpage>e100423</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35410952"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmjhci-2021-100423</pub-id>
          <pub-id pub-id-type="medline">35410952</pub-id>
          <pub-id pub-id-type="pii">bmjhci-2021-100423</pub-id>
          <pub-id pub-id-type="pmcid">PMC9003600</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="web">
          <article-title>Rising Equitable Community Data Ecosystems (RECoDE)</article-title>
          <source>data.org</source>
          <year>2022</year>
          <access-date>2023-07-30</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://data.org/reports/recode-report/">https://data.org/reports/recode-report/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="web">
          <article-title>The proliferation of AI ethics principles: what's next?</article-title>
          <source>Montreal AI Ethics Institute</source>
          <year>2021</year>
          <access-date>2023-07-30</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://montrealethics.ai/the-proliferation-of-ai-ethics-principles-whats-next/">https://montrealethics.ai/the-proliferation-of-ai-ethics-principles-whats-next/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rajkomar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hardt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Howell</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Chin</surname>
              <given-names>MH</given-names>
            </name>
          </person-group>
          <article-title>Ensuring fairness in machine learning to advance health equity</article-title>
          <source>Ann Intern Med</source>
          <year>2018</year>
          <volume>169</volume>
          <issue>12</issue>
          <fpage>866</fpage>
          <lpage>872</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30508424"/>
          </comment>
          <pub-id pub-id-type="doi">10.7326/M18-1990</pub-id>
          <pub-id pub-id-type="medline">30508424</pub-id>
          <pub-id pub-id-type="pii">2717119</pub-id>
          <pub-id pub-id-type="pmcid">PMC6594166</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wiens</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Saria</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sendak</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>VX</given-names>
            </name>
            <name name-style="western">
              <surname>Doshi-Velez</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Heller</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kale</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Saeed</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ossorio</surname>
              <given-names>PN</given-names>
            </name>
            <name name-style="western">
              <surname>Thadaney-Israni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Goldenberg</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Do no harm: a roadmap for responsible machine learning for health care</article-title>
          <source>Nat Med</source>
          <year>2019</year>
          <volume>25</volume>
          <issue>9</issue>
          <fpage>1337</fpage>
          <lpage>1340</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-019-0548-6</pub-id>
          <pub-id pub-id-type="medline">31427808</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-019-0548-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dankwa-Mullan</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Scheufele</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>Matheny</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Quintana</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chapman</surname>
              <given-names>WW</given-names>
            </name>
            <name name-style="western">
              <surname>Jackson</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>South</surname>
              <given-names>BR</given-names>
            </name>
          </person-group>
          <article-title>A proposed framework on integrating health equity and racial justice into the artificial intelligence development lifecycle</article-title>
          <source>J Health Care Poor Underserved</source>
          <year>2021</year>
          <volume>32</volume>
          <issue>2</issue>
          <fpage>300</fpage>
          <lpage>317</lpage>
          <pub-id pub-id-type="doi">10.1353/hpu.2021.0065</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vayena</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Blasimme</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>IG</given-names>
            </name>
          </person-group>
          <article-title>Machine learning in medicine: addressing ethical challenges</article-title>
          <source>PLoS Med</source>
          <year>2018</year>
          <volume>15</volume>
          <issue>11</issue>
          <fpage>e1002689</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.plos.org/plosmedicine/article?id=10.1371/journal.pmed.1002689"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pmed.1002689</pub-id>
          <pub-id pub-id-type="medline">30399149</pub-id>
          <pub-id pub-id-type="pii">PMEDICINE-D-18-03354</pub-id>
          <pub-id pub-id-type="pmcid">PMC6219763</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Lurie</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Mulligan</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Reconfiguring diversity and inclusion for AI ethics</article-title>
          <year>2021</year>
          <conf-name>Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society</conf-name>
          <conf-date>May 19-21, 2021</conf-date>
          <conf-loc>USA</conf-loc>
          <fpage>447</fpage>
          <lpage>457</lpage>
          <pub-id pub-id-type="doi">10.1145/3461702.3462622</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karnik</surname>
              <given-names>NS</given-names>
            </name>
            <name name-style="western">
              <surname>Afshar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Churpek</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Nunez-Smith</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Structural disparities in data science: a prolegomenon for the future of machine learning</article-title>
          <source>Am J Bioeth</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>11</issue>
          <fpage>35</fpage>
          <lpage>37</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33103976"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/15265161.2020.1820102</pub-id>
          <pub-id pub-id-type="medline">33103976</pub-id>
          <pub-id pub-id-type="pmcid">PMC7695219</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="web">
          <article-title>UNESCO member states adopt the first ever global agreement on the ethics of artificial intelligence</article-title>
          <source>UNESCO</source>
          <access-date>2023-07-30</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.unesco.org/en/articles/unesco-member-states-adopt-first-ever-global-agreement-ethics-artificial-intelligence">https://www.unesco.org/en/articles/unesco-member-states-adopt-first-ever-global-agreement-ethics-artificial-intelligence</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McLennan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Fiske</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Celi</surname>
              <given-names>LA</given-names>
            </name>
          </person-group>
          <article-title>AI ethics is not a panacea</article-title>
          <source>Am J Bioeth</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>11</issue>
          <fpage>20</fpage>
          <lpage>22</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33103983"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/15265161.2020.1819470</pub-id>
          <pub-id pub-id-type="medline">33103983</pub-id>
          <pub-id pub-id-type="pmcid">PMC8034825</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jobin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ienca</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vayena</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>The global landscape of AI ethics guidelines</article-title>
          <source>Nat Mach Intell</source>
          <year>2019</year>
          <volume>1</volume>
          <issue>9</issue>
          <fpage>389</fpage>
          <lpage>399</lpage>
          <pub-id pub-id-type="doi">10.1038/s42256-019-0088-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="web">
          <source>Future of privacy forum resources—ethics, governance, and compliance resources</source>
          <access-date>2023-07-30</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://sites.google.com/fpf.org/futureofprivacyforumresources/ethics-governance-and-compliance-resources">https://sites.google.com/fpf.org/futureofprivacyforumresources/ethics-governance-and-compliance-resources</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morley</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Machado</surname>
              <given-names>CCV</given-names>
            </name>
            <name name-style="western">
              <surname>Burr</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cowls</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Taddeo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>The ethics of AI in health care: a mapping review</article-title>
          <source>Soc Sci Med</source>
          <year>2020</year>
          <volume>260</volume>
          <fpage>113172</fpage>
          <pub-id pub-id-type="doi">10.1016/j.socscimed.2020.113172</pub-id>
          <pub-id pub-id-type="medline">32702587</pub-id>
          <pub-id pub-id-type="pii">S0277-9536(20)30391-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wilkins</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Effective engagement requires trust and being trustworthy</article-title>
          <source>Med Care</source>
          <year>2018</year>
          <volume>56</volume>
          <issue>10 Suppl 1</issue>
          <fpage>S6</fpage>
          <lpage>S8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30015725"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/MLR.0000000000000953</pub-id>
          <pub-id pub-id-type="medline">30015725</pub-id>
          <pub-id pub-id-type="pmcid">PMC6143205</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Glover</surname>
              <given-names>WJ</given-names>
            </name>
            <name name-style="western">
              <surname>Hendricks-Sturrup</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Maier</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Oehmen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Vermaas</surname>
              <given-names>PE</given-names>
            </name>
          </person-group>
          <article-title>Ethics and equity-centred perspectives in engineering systems design</article-title>
          <source>Handbook of Engineering Systems Design</source>
          <year>2022</year>
          <publisher-loc>London</publisher-loc>
          <publisher-name>Springer Cham</publisher-name>
          <fpage>1</fpage>
          <lpage>24</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dalkey</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>An experimental study of group opinion</article-title>
          <source>Futures</source>
          <year>1969</year>
          <volume>1</volume>
          <issue>5</issue>
          <fpage>408</fpage>
          <lpage>426</lpage>
          <pub-id pub-id-type="doi">10.1016/s0016-3287(69)80025-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nasa</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Juneja</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Delphi methodology in healthcare research: how to decide its appropriateness</article-title>
          <source>World J Methodol</source>
          <year>2021</year>
          <volume>11</volume>
          <issue>4</issue>
          <fpage>116</fpage>
          <lpage>129</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.wjgnet.com/2222-0682/full/v11/i4/116.htm"/>
          </comment>
          <pub-id pub-id-type="doi">10.5662/wjm.v11.i4.116</pub-id>
          <pub-id pub-id-type="medline">34322364</pub-id>
          <pub-id pub-id-type="pmcid">PMC8299905</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Riedel</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Edward Jenner and the history of smallpox and vaccination</article-title>
          <source>Proc (Bayl Univ Med Cent)</source>
          <year>2005</year>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>21</fpage>
          <lpage>25</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/16200144"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/08998280.2005.11928028</pub-id>
          <pub-id pub-id-type="medline">16200144</pub-id>
          <pub-id pub-id-type="pmcid">PMC1200696</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Strickler</surname>
              <given-names>DA</given-names>
            </name>
          </person-group>
          <article-title>Microbes and men</article-title>
          <source>JAMA</source>
          <year>1916</year>
          <volume>LXVI</volume>
          <issue>1</issue>
          <fpage>52</fpage>
          <pub-id pub-id-type="doi">10.1001/jama.1916.02580270056031</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brandt</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Racism and research: the case of the Tuskegee Syphilis study</article-title>
          <source>Hastings Cent Rep</source>
          <year>1978</year>
          <volume>8</volume>
          <issue>6</issue>
          <fpage>21</fpage>
          <lpage>29</lpage>
          <pub-id pub-id-type="doi">10.2307/3561468</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="web">
          <article-title>The untreated syphilis study at Tuskegee timeline</article-title>
          <source>Centers for Disease Control and Prevention</source>
          <year>2022</year>
          <access-date>2023-08-27</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.cdc.gov/tuskegee/timeline.htm">https://www.cdc.gov/tuskegee/timeline.htm</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vishwanatha</surname>
              <given-names>JK</given-names>
            </name>
            <name name-style="western">
              <surname>Christian</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sambamoorthi</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Thompson</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>Stinson</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Syed</surname>
              <given-names>TA</given-names>
            </name>
          </person-group>
          <article-title>Community perspectives on AI/ML and health equity: AIM-AHEAD nationwide stakeholder listening sessions</article-title>
          <source>PLOS Digit Health</source>
          <year>2023</year>
          <volume>2</volume>
          <issue>6</issue>
          <fpage>e0000288</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37390116"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000288</pub-id>
          <pub-id pub-id-type="medline">37390116</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-22-00316</pub-id>
          <pub-id pub-id-type="pmcid">PMC10313007</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zou</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schiebinger</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Ensuring that biomedical AI benefits diverse populations</article-title>
          <source>EBioMedicine</source>
          <year>2021</year>
          <volume>67</volume>
          <fpage>103358</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2352-3964(21)00151-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ebiom.2021.103358</pub-id>
          <pub-id pub-id-type="medline">33962897</pub-id>
          <pub-id pub-id-type="pii">S2352-3964(21)00151-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC8176083</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="web">
          <article-title>Health care artificial intelligence code of conduct</article-title>
          <source>National Academy of Medicine</source>
          <access-date>2023-07-31</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://nam.edu/programs/value-science-driven-health-care/health-care-artificial-intelligence-code-of-conduct/">https://nam.edu/programs/value-science-driven-health-care/health-care-artificial-intelligence-code-of-conduct/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="web">
          <article-title>Advancing health care AI through ethics, evidence and equity</article-title>
          <source>American Medical Association</source>
          <year>2023</year>
          <access-date>2023-10-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ama-assn.org/practice-management/digital/advancing-health-care-ai-through-ethics-evidence-and-equity">https://www.ama-assn.org/practice-management/digital/advancing-health-care-ai-through-ethics-evidence-and-equity</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Berdahl</surname>
              <given-names>CT</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Osoba</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Girosi</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Strategies to improve the impact of artificial intelligence on health equity: scoping review</article-title>
          <source>JMIR AI</source>
          <year>2023</year>
          <volume>2</volume>
          <issue>1</issue>
          <fpage>e42936</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ai.jmir.org/2023/1/e42936"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="web">
          <article-title>Ranking member cassidy releases white paper on artificial intelligence</article-title>
          <source>The U.S. Senate Committee on Health, Education, Labor &#38; Pensions</source>
          <year>2023</year>
          <access-date>2023-10-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.help.senate.gov/ranking/newsroom/press/ranking-member-cassidy-releases-white-paper-on-artificial-intelligence">https://www.help.senate.gov/ranking/newsroom/press/ranking-member-cassidy-releases-white-paper-on-artificial-intelligence</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="web">
          <article-title>Ethics guidelines for trustworthy AI</article-title>
          <source>European Commission: Shaping Europe's digital future</source>
          <year>2019</year>
          <access-date>2023-10-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai">https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="web">
          <article-title>AI principles</article-title>
          <source>Future of Life Institute</source>
          <year>2017</year>
          <access-date>2023-10-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://futureoflife.org/open-letter/ai-principles/">https://futureoflife.org/open-letter/ai-principles/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="web">
          <article-title>Executive order on the safe, secure, and trustworthy development and use of artificial intelligence</article-title>
          <source>The White House</source>
          <year>2023</year>
          <access-date>2023-10-31</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.whitehouse.gov/briefing-room/presidential-actions/2023/10/30/executive-order-on-the-safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence/">https://www.whitehouse.gov/briefing-room/presidential-actions/2023/10/30/executive-order-on-the-safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="web">
          <article-title>AIM-AHEAD</article-title>
          <source>National Institutes of Health: Office of Data Science Strategy</source>
          <access-date>2023-11-21</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://datascience.nih.gov/artificial-intelligence/aim-ahead">https://datascience.nih.gov/artificial-intelligence/aim-ahead</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
