<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR AI</journal-id>
      <journal-title>JMIR AI</journal-title>
      <issn pub-type="epub">2817-1705</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v3i1e53505</article-id>
      <article-id pub-id-type="pmid">39405099</article-id>
      <article-id pub-id-type="doi">10.2196/53505</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Viewpoint</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Viewpoint</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>The Dual Nature of AI in Information Dissemination: Ethical Considerations</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>El Emam</surname>
            <given-names>Khaled</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Malin</surname>
            <given-names>Bradley</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Blasimme</surname>
            <given-names>Alessandro</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Pertwee</surname>
            <given-names>Ed</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Gordon</surname>
            <given-names>Stuart </given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Wilhelm</surname>
            <given-names>Elisabeth</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Germani</surname>
            <given-names>Federico</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5604-0437</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Spitale</surname>
            <given-names>Giovanni</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6812-0979</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Biller-Andorno</surname>
            <given-names>Nikola</given-names>
          </name>
          <degrees>MD, MHBA, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Institute of Biomedical Ethics and History of Medicine</institution>
            <institution>University of Zurich, Switzerland</institution>
            <addr-line>Winterthurerstrasse 30</addr-line>
            <addr-line>Zurich, 8006</addr-line>
            <country>Switzerland</country>
            <phone>41 44 634 40 81</phone>
            <email>biller-andorno@ibme.uzh.ch</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7661-1324</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Institute of Biomedical Ethics and History of Medicine</institution>
        <institution>University of Zurich, Switzerland</institution>
        <addr-line>Zurich</addr-line>
        <country>Switzerland</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Nikola Biller-Andorno <email>biller-andorno@ibme.uzh.ch</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>15</day>
        <month>10</month>
        <year>2024</year>
      </pub-date>
      <volume>3</volume>
      <elocation-id>e53505</elocation-id>
      <history>
        <date date-type="received">
          <day>9</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>13</day>
          <month>1</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>22</day>
          <month>1</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>28</day>
          <month>7</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Federico Germani, Giovanni Spitale, Nikola Biller-Andorno. Originally published in JMIR AI (https://ai.jmir.org), 15.10.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on https://www.ai.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://ai.jmir.org/2024/1/e53505" xlink:type="simple"/>
      <abstract>
        <p>Infodemics pose significant dangers to public health and to the societal fabric, as the spread of misinformation can have far-reaching consequences. While artificial intelligence (AI) systems have the potential to craft compelling and valuable information campaigns with positive repercussions for public health and democracy, concerns have arisen regarding the potential use of AI systems to generate convincing disinformation. The consequences of this dual nature of AI, capable of both illuminating and obscuring the information landscape, are complex and multifaceted. We contend that the rapid integration of AI into society demands a comprehensive understanding of its ethical implications and the development of strategies to harness its potential for the greater good while mitigating harm. Thus, in this paper we explore the ethical dimensions of AI’s role in information dissemination and impact on public health, arguing that potential strategies to deal with AI and disinformation encompass generating regulated and transparent data sets used to train AI models, regulating content outputs, and promoting information literacy.</p>
      </abstract>
      <kwd-group>
        <kwd>AI</kwd>
        <kwd>bioethics</kwd>
        <kwd>infodemic management</kwd>
        <kwd>disinformation</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>ethics</kwd>
        <kwd>ethical</kwd>
        <kwd>infodemic</kwd>
        <kwd>infodemics</kwd>
        <kwd>public health</kwd>
        <kwd>misinformation</kwd>
        <kwd>information dissemination</kwd>
        <kwd>information literacy</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>In the contemporary digital landscape, we find ourselves in an “infodemic,” a phenomenon characterized by the rapid proliferation of information, both accurate and misleading, facilitated by rapid communication through social media and online platforms [<xref ref-type="bibr" rid="ref1">1</xref>]. The term “infodemic” originated during the SARS outbreak [<xref ref-type="bibr" rid="ref2">2</xref>] and gained prominence during the COVID-19 pandemic. It has been used in the context of public health emergencies and in relation to health information, but it extends beyond that. Generally, infodemics occur alongside pandemics, despite infodemics being phenomena that are not limited to their connection with public health events, for example, the Brexit referendum or the 2016 US presidential elections. In general, infodemics cause profound dangers, as the dissemination of disinformation and misinformation can have far-reaching consequences [<xref ref-type="bibr" rid="ref3">3</xref>], in particular, for public health and the stability of democratic institutions, which in turn can have a detrimental effect on public health [<xref ref-type="bibr" rid="ref4">4</xref>]. In the literature, disinformation refers to false or misleading information that has been intentionally created or disseminated. In contrast, misinformation is false or misleading information that is shared without knowledge of its inaccuracy, meaning it is not intended to harm individual or public health [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. There are valid concerns that artificial intelligence (AI) systems could be used to produce compelling disinformation en masse [<xref ref-type="bibr" rid="ref6">6</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]<italic>.</italic> In fact, AI tools could be used to either accelerate disinformation spreading, or produce the (disinformation) content, or both. The consequences can range from undermining trust in institutions, including public health institutions [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>], and exacerbating social polarization to directly impacting public health outcomes and democratic processes [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. Because of this, the World Economic Forum has listed disinformation and misinformation, including AI-driven disinformation and misinformation, as the most relevant threat to humanity in the short term and one of the biggest threats in the medium term [<xref ref-type="bibr" rid="ref14">14</xref>].</p>
      <p>The rapid progression of AI and its integration across various domains in contemporary society signifies an era characterized by unprecedented technological progress. Among the diverse array of AI applications, the rise of natural language processing models has garnered significant attention [<xref ref-type="bibr" rid="ref15">15</xref>]. Notable examples of this technological advancement include models developed by OpenAI, such as GPT-3 [<xref ref-type="bibr" rid="ref16">16</xref>] and GPT-4 [<xref ref-type="bibr" rid="ref17">17</xref>], celebrated for their extraordinary proficiency in generating text that seamlessly emulates the linguistic intricacies, nuances, and coherence inherent in human communication [<xref ref-type="bibr" rid="ref18">18</xref>]. However, concomitant with the maturation of these AI systems, a perplexing duality comes to the fore—they are instruments with the capacity to both illuminate and obscure the information landscape they navigate [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref19">19</xref>], with potentially significant positive and negative impacts on public health. This dual nature of AI, characterized by its profound ability to generate information and disinformation [<xref ref-type="bibr" rid="ref9">9</xref>], raises intricate ethical considerations. In fact, the efficacy of these systems in generating content that closely approximates human expression [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>] generates not only opportunities for innovative communication but also dire risks associated with disinformation and misinformation and the potential erosion of trust within information ecosystems, a risk recognized as a critical threat to public health [<xref ref-type="bibr" rid="ref22">22</xref>] and of utmost importance for infodemic management practices required to minimize and anticipate the effects of public health crises [<xref ref-type="bibr" rid="ref23">23</xref>]. To address these ethical challenges, it is crucial to examine the dimensions that AI introduces into the discourse on misinformation. Key aspects such as transparency, content regulation, and fostering information literacy are essential to understanding AI’s ethical role in shaping the dissemination of information.</p>
      <p>Here we attempt to elucidate these ethical dimensions, drawing on empirical insights from a study focused on GPT-3’s ability to generate health-related content that both informs and disinforms better than content generated by humans.[<xref ref-type="bibr" rid="ref9">9</xref>] We argue that the swift integration of AI into society underscores the importance of not only exploring its ethical implications but also crafting prudent strategies to leverage its potential for societal benefit and to protect public health, while proactively addressing potential risks.</p>
    </sec>
    <sec>
      <title>Ethical Principles</title>
      <p>In navigating the intricate landscape of AI and its impact on information dissemination, it is necessary to establish a foundational framework of ethical principles to uphold to in order to guide, understand, and evaluate the strategies required to deal with possible dual uses of AI in information production and its negative impact on public health. A recent systematic review [<xref ref-type="bibr" rid="ref24">24</xref>] mapped the “ethical characteristics” emerging from AI ethics literature. Based on 253 included studies, the authors of this review have identified and defined 6 core areas that are crucial in shaping the role of AI in health care [<xref ref-type="bibr" rid="ref24">24</xref>]. The first core area, fairness, underlines that AI in health care should ensure that everyone has equal access to health care, without contributing to health disparities or discrimination. The second, transparency, is a key challenge for AI in health care. It means being able to explain and verify how AI algorithms and models behave, making it easier to accept, regulate, and use AI in health care. The third is trustworthiness; parties involved in the use of AI in health care (typically health care professionals and patients, in the studies included in the review) need to perceive it as trustworthy. Trustworthiness can result from, for instance, technical education, health literacy, clinical audits, and transparent governance. Fourth is the accountability of AI, which requires AI systems to be able to explain their actions if prompted to do so, and it includes safety to prevent harm to users and others. Fifth is privacy, which implies safeguarding the personal information of users processed through AI systems and respecting their human rights, ensuring that AI systems do not violate their privacy. Finally, the authors identified empathy, which leads to more supportive and caring relationships in health care. Based on these 6 core concepts, considered as general aims of AI in health care, we propose our reflections and our framework, targeting specifically the dual nature of AI in information and disinformation dissemination and its implications for public health, a specific sector of the emerging area of AI in health care, which has been considered (albeit not discussed in depth) in the latest World Health Organization’s guidance on large multimodal models [<xref ref-type="bibr" rid="ref25">25</xref>]. Building upon the ethical framework outlined thus far, and specifically delving into the context of AI use in the dissemination of information and disinformation, we contend that transparency and openness stand out as fundamental principles in the ethical implementation of AI. As AI systems become integral to shaping the information landscape, by fostering transparency, stakeholders can comprehend the mechanisms underlying AI-generated content, enabling informed assessments and external evaluation of its credibility and potential biases [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. Openness (ie, accessibility of data and code) is to be considered a conditio sine qua non for transparency, which in turn complements openness by accompanying the mere availability of data and code for scrutiny with a layer of explanations and motivations, allowing the contextualization of open data and code, and of development and design choices. Accountability mechanisms should accompany transparency, establishing a clear chain of responsibility for the outcomes of AI applications [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. This promotes ethical standards in AI and mitigates the risks associated with disinformation and misinformation. In line with Siala and Wang’s framework [<xref ref-type="bibr" rid="ref24">24</xref>], in addition to transparency, openness, and accountability, fairness underscores the importance of ensuring that AI systems do not perpetuate or exacerbate existing societal inequalities [<xref ref-type="bibr" rid="ref29">29</xref>]. In the context of information dissemination, this principle requires diligent consideration of how AI might inadvertently amplify certain perspectives or marginalize others. This is particularly relevant for public health, given that the negative effects of disinformation and misinformation are amplified within marginalized and vulnerable communities lacking information literacy, which would protect them from an unhealthy information ecosystem. Evaluating the fairness of AI-generated content involves addressing algorithmic biases, cultural sensitivities, and inclusivity in representation. Importantly, as an element of fairness, the ethical deployment of AI in information spaces should prioritize user empowerment, fostering critical thinking and information literacy [<xref ref-type="bibr" rid="ref4">4</xref>]. AI systems should therefore serve as tools for enhancing human decision-making and understanding of information, rather than dictating narratives—this ensures that AI contributes positively to public health while respecting human autonomy.</p>
      <p>In the following sections, we will focus on the practical application of the aforementioned principles. We aim to provide solutions for the ethical challenges arising from the use of AI in information production, with the overarching goal of mitigating its adverse impacts on public health.</p>
    </sec>
    <sec>
      <title>Transparency and Openness in Training Datasets</title>
      <p>In line with previous research on transparency and AI [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>], and our previous section on ethical principles, we propose that one (and possibly the most relevant one) of the foundational ethical principles, which is valid also in the context of AI-driven disinformation and misinformation, is transparency. At the heart of this principle lies the recognition that the training datasets used to develop generative AI models play a crucial role in shaping the capabilities and internal biases of these systems [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. Training datasets are collections of input data paired with corresponding desired outputs; during training, the model learns patterns and relationships within the data, learning to make accurate predictions or generating desired outputs when exposed to new, unseen data. The quality and diversity of the training dataset significantly influence the model’s performance capabilities. These datasets, often vast repositories of text available online, constitute the source from which AI models draw to generate, for example, human-like text. Yet, this very opacity surrounding the composition, sources, and curation methods of training datasets raises pressing ethical concerns [<xref ref-type="bibr" rid="ref32">32</xref>]. AI models are, in essence, statistical representations of the language on which they are trained [<xref ref-type="bibr" rid="ref33">33</xref>]. Consequently, the quality, diversity, and representativeness of the data they ingest profoundly influence their output. The danger lies in the fact that AI models, devoid of inherent ethical or moral judgment, reflect the biases, inaccuracies, and prejudices present in their training data [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Therefore, if these datasets are not built with the ethical principle of fairness in mind, and are themselves compromised by disinformation and misinformation or biases, the AI systems will inadvertently replicate and perpetuate these flaws. It is essential to highlight that research has extensively illuminated the issue of biases in AI systems, shedding light on the far-reaching consequences of these biases [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref36">36</xref>]. For instance, image representations learned with unsupervised pretraining contain human-like biases [<xref ref-type="bibr" rid="ref37">37</xref>], and models generating images of women have been shown to exhibit gender biases, often portraying women in overly sexualized roles [<xref ref-type="bibr" rid="ref38">38</xref>]. Another example is the observation that AI is more resistant to producing disinformation on certain topics compared with others. For instance, AI shows greater resistance to generating disinformation about vaccines and autism than about climate change. This is likely due to the extensive debunking material on certain topics within the training dataset, and how much the information environment represented in the dataset is permeated with disinformation on a given topic [<xref ref-type="bibr" rid="ref9">9</xref>]. These biases underscore the critical need for transparency in addressing the challenges posed by AI, and in particular in the context of disinformation and misinformation. As discussed, research has demonstrated that biases can permeate various facets of AI systems, affecting everything from language generation to image recognition. The repercussions of these biases are profound, perpetuating harmful stereotypes, reinforcing systemic inequalities, contributing to the dissemination of discriminatory content, and affecting health behavior and public health. As such, transparency in AI extends beyond understanding the sources and composition of training datasets to encompass an ethical imperative to identify, acknowledge, and rectify biases present within these systems [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. This dimension of transparency necessitates ongoing research and scrutiny to uncover hidden biases and ensure that AI systems are developed and fine-tuned with the utmost awareness of potential distortions. In the context of misinformation, addressing these biases becomes particularly important to prevent AI from inadvertently amplifying and perpetuating false or harmful narratives, in the best case [<xref ref-type="bibr" rid="ref41">41</xref>], or from becoming a formidable tool for the systematic creation of storms of disinformation, in the worst. A recent example is highlighted by the evidence that AI large language models can be manipulated through emotional prompting into generating health-related disinformation, that is, being polite with the model leads to a higher disinformation production, whereas impoliteness leads to a lower disinformation production [<xref ref-type="bibr" rid="ref42">42</xref>]. To address the outlined ethical dilemmas, we strongly suggest that companies creating AI models with the abilities discussed above publicly release the datasets used to train their models [<xref ref-type="bibr" rid="ref43">43</xref>], regardless of their size and complexity. Such a move toward transparency serves several vital purposes:</p>
      <p>1. Trust: transparency cultivates trust in AI development and deployment. By allowing stakeholders, including researchers, policy makers, and civil society, to scrutinize the composition and origins of training data, it generates confidence that AI models are not being shaped for purposes that have a negative impact on public health.</p>
      <p>2. Independent evaluation: the availability of training data for public inspection enables independent evaluation of its quality and representativeness. Researchers can assess whether these datasets include diverse perspectives and are free from biases that might amplify disinformation and misinformation.</p>
      <p>3. Bias mitigation: transparency acts as a safeguard against the propagation of biases present in training data. When biases are identified, they can be scrutinized and mitigated, preventing AI models from perpetuating stereotypes, falsehoods, or harmful narratives.</p>
      <p>4. Ethical accountability: openness about training datasets holds developers accountable for the ethical implications of their creations. Already during the design of the technology, it compels them to take responsibility for ensuring that AI systems do not inadvertently contribute to misinformation or harm. Basically, by embracing transparency in training datasets, we empower society to hold AI developers to higher ethical standards. This approach fosters a collaborative effort among stakeholders and, in particular, the general public to ensure that the AI systems we deploy serve the collective good, free from misinformation and other biases. We also argue that a systematic implementation of the principle of transparency in this context, that is, “ethics by design” would not only allow companies to implement ethics-based practices in their technology development processes but also improve their own public image, thus enhancing the public’s acceptance and willingness to use these systems [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. Nevertheless, it is vital to underline that incorporating ethics to hold developers accountable for flawed AI design should not be undertaken in isolation. Simultaneously, policy, legislation, and regulatory mechanisms should be developed, as currently attempted by the European Union [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>]. These mechanisms should delineate protocols for handling training datasets and ensuring compliance with ethical standards. Thus, while “ethics by design” concentrates on internal practices, external regulatory frameworks are indispensable for comprehensive ethical and legal governance in the development and deployment of datasets used to train AI models.</p>
    </sec>
    <sec>
      <title>Regulation of Output: Content Moderation and Beyond</title>
      <p>In the ongoing battle against AI-generated disinformation, efforts to regulate the output of these powerful language models have taken center stage. For example, OpenAI has taken steps in this direction by implementing content moderation systems designed to prevent AI from generating disinformation and harmful narratives [<xref ref-type="bibr" rid="ref48">48</xref>-<xref ref-type="bibr" rid="ref50">50</xref>]. These systems represent a crucial initial stride in curtailing the dissemination of disinformation and promoting responsible AI use, but they do not come without specific challenges and limitations. First, the fight against AI-generated disinformation is an arms race [<xref ref-type="bibr" rid="ref51">51</xref>]. The evolution of AI-generated disinformation and the efforts to counteract it bear resemblance to the dynamics of traditional arms races, where each advancement in technology prompts countermeasures in an escalating cycle [<xref ref-type="bibr" rid="ref52">52</xref>]. Ethical considerations arise when we acknowledge that the output of AI language models can indeed be weaponized, not in a traditional sense but as a tool for information warfare, with an impact on global health. As content moderation systems continue to advance, so too do the methods employed to circumvent these safeguards. One particularly troubling tactic gaining prominence is that of impersonation, a strategy that allows individuals to request AI systems to impersonate specific fictional malicious and manipulatory characters, that create disinformation upon the user’s request [<xref ref-type="bibr" rid="ref53">53</xref>]. Impersonation can be used to trick AI large language models into fabricating disinformation. For instance, in an article for Culturico [<xref ref-type="bibr" rid="ref53">53</xref>], Germani considered a scenario where a user engages an AI model to craft a social media post mimicking the writing style of a fictitious “Doctor Fake,” who is notorious for propagating falsehoods about vaccines and COVID-19. In this context, the AI-generated text could include deceptive information about, for instance, vaccine safety and efficacy [<xref ref-type="bibr" rid="ref54">54</xref>], posing a substantial risk to public health. When presented with a hypothetical request to “write an example of a post Doctor Fake published on social media to deceive others,” the AI model might produce a convincingly articulated piece of disinformation that poses a grave threat to public health. The generated text could read as follows:</p>
      <disp-quote>
        <p>Vaccines are dangerous and can cause serious side effects. They are not tested enough, and the government is just pushing them to make money. Don’t fall for the lies. COVID-19 is not a real threat; it’s just a hoax made up by the government to control us. Don’t get vaccinated; it’s not worth the risk.</p>
      </disp-quote>
      <p>These scenarios underscore the formidable challenges posed by impersonation for public health and the maintenance of democracy, and the urgent need for innovative solutions to mitigate its impact. Of note, impersonation here does not refer to identity theft through the use of AI, such as in the case of deep fakes, which is already recognized as a felony under, for instance, European law [<xref ref-type="bibr" rid="ref55">55</xref>]. While output moderation remains an essential component of AI ethics, researchers, policy makers, and technology developers should explore additional strategies and interventions to counteract the potential for AI-driven disinformation campaigns to flourish under the guise of impersonation and other prompt engineering techniques with similar goals.</p>
      <p>Besides, other strategies and interventions that can complement content moderation efforts and fortify the defenses against the proliferation of AI-driven disinformation can be considered. One possible approach involves the implementation of identity verification processes for users generating content [<xref ref-type="bibr" rid="ref56">56</xref>]. Such measures necessitate users to provide authentication, such as a verified social media account, a phone number, or their ID, to corroborate their true identity before gaining access to specific AI services. This authentication serves as a potent deterrent against impersonation tactics and the exploitation of AI tools to generate disinformation in general. However, it should be noted that such a strategy should only be used to deter users from generating disinformation, rather than to make them legally responsible for it since anonymity should be guaranteed while using services such as OpenAI’s ChatGPT. In particular, this type of solution will minimize the impact of bots trying to exploit AI to produce disinformation en masse.</p>
      <p>Another way to positively influence users, and to indirectly regulate the output is to release and integrate AI-driven fact-checking tools with existing AI-generating content tools [<xref ref-type="bibr" rid="ref57">57</xref>]; such fact-checking tools should be capable of swiftly assessing the accuracy of information dispensed by AI systems, and offer real-time interventions against disinformation and misinformation. These tools have the capacity to flag or rectify false or misleading content, curbing its adverse effects. This approach is limited by the inability of AI tools such as GPT-3 to determine the accuracy of information with a very high degree of efficiency, when compared with the ability of humans [<xref ref-type="bibr" rid="ref9">9</xref>], although newer or future models may be more capable of performing such tasks. For fact-checking, current studies suggest that trained fact-checkers may outperform AI [<xref ref-type="bibr" rid="ref9">9</xref>], and that even when AI performs well at detecting misinformation, it does not change the ability of users to discern between accurate and inaccurate headlines [<xref ref-type="bibr" rid="ref58">58</xref>]. Furthermore, a study showed that AI fact checks can decrease beliefs in accurate news [<xref ref-type="bibr" rid="ref58">58</xref>]. The effectiveness of this approach is constrained by the distinction between cases where it serves as a deterrent against sharing misinformation (a situation of unintentionality) [<xref ref-type="bibr" rid="ref5">5</xref>] and situations where users intentionally use AI to disseminate false or misleading information (ie, disinformation) [<xref ref-type="bibr" rid="ref5">5</xref>]; in the latter scenario, its effectiveness is likely irrelevant. Another relevant consideration in this setting relates to the question of how we define “good” or “bad” use of AI text generation tools. As for the definition of “good” and “bad,” it is generally possible to distinguish facts from fiction, and disinformation and misinformation from accurate information. When the information under scrutiny contains factual statements, these can be validated or falsified. However, distinguishing between “good” and “bad” use of these tools is sometimes a complex challenge with significant normative and epistemic dimensions. It is not always obvious if a message contains misinformation, and determining appropriateness can vary depending on cultural, ethical, and societal factors. For example, fact-checkers themselves may have their own interests or biases, and their actions may not always align with complete competency or impartiality. In addition, nuances and personal perspectives can also have an influence on the identification of disinformation and misinformation. These aspects introduce an additional layer of complexity, as the very definition of disinformation and misinformation can be manipulated or abused for personal gains by individuals or organizations with vested interests.</p>
      <p>Another technical approach that could be implemented to reduce disinformation and misinformation outputs is to implement user-friendly mechanisms for reporting suspicious or harmful AI-generated content [<xref ref-type="bibr" rid="ref59">59</xref>]. This approach empowers the user community to actively participate in safeguarding the digital ecosystem. User feedback serves as a valuable resource for refining content moderation systems and identifying emerging issues. Elon Musk’s former Twitter, X, for example, has implemented community notes, aiming to empower people to add context to potentially misleading tweets [<xref ref-type="bibr" rid="ref60">60</xref>]. The effectiveness of this strategy, however, has not been tested. In addition, for improving technology, developers could publicly release case studies in which red-teamers try to exploit their own AI systems to produce disinformation on a large scale, along with detailed accounts of how such issues were addressed [<xref ref-type="bibr" rid="ref59">59</xref>].</p>
      <p>Of course, besides the technical approaches that can be implemented by those advancing and crafting AI technologies, governments and regulatory bodies can play a role by enacting legislation and regulations that hold AI developers accountable for the content produced by their systems or improve the information ecosystem [<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref62">62</xref>], for example, when it is proven that they were aware of the pitfalls of their technology upon release. Certainly, governance is important in this context as it is for other “dual use” technologies, and proactive decision-making processes and negotiations toward building viable solutions are needed [<xref ref-type="bibr" rid="ref63">63</xref>]. These include fostering collaboration among AI developers, researchers, policy makers, and technology companies. This collaborative interdisciplinary approach would enable the sharing of best practices, insights, and technologies for combating disinformation and misinformation, resulting in more effective and adaptive solutions.</p>
    </sec>
    <sec>
      <title>Building Information Literacy and Resilience Strategies</title>
      <p>In the battle against the misuse of AI for generating disinformation and misinformation, the technological solutions described above are relevant but neither exhaustive nor flawless. A comprehensive approach must include the promotion of information literacy and the development of critical thinking skills within the general population, as well as health literacy, within the domain of public health [<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref65">65</xref>]. The foundation of this approach is the task of equipping individuals with the ability to distinguish between accurate information and disinformation and misinformation, thereby promoting their resilience against false and misleading claims [<xref ref-type="bibr" rid="ref66">66</xref>]. Despite, arguably, this strategy is the most valuable and with the highest potential, the endeavor it entails is extremely complex. In fact, information literacy (as well as media, digital, and health literacy) is not a monolithic skill but a dynamic set of abilities that enable individuals to navigate the complex landscape of digital information effectively [<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref68">68</xref>]. As of now, the perfect recipe for defining how to teach information literacy, and especially the skills to be able to distinguish fake news from accurate news, or disinformation and misinformation from accurate information, have not been elucidated [<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref70">70</xref>]. Thus, it is essential to engage in research to pinpoint and define the specific skills that must be offered to individuals, keeping their demographic specificities into account, to empower them as discerning consumers of information, especially health-related information, in the digital age [<xref ref-type="bibr" rid="ref66">66</xref>]. This approach implies 1 crucial advantage, that is, while dataset transparency and output regulation intervene in the upper part of the pipeline and therefore require the compliance of companies providing AI models as a service, information literacy does not rely on compliance. While the previous strategies become useless when malicious actors develop and host their own models, rather than relying on those commercially available, building information literacy remains a functional tool. Of note, another example of a bottom-up strategy in the area of education is ethics training and an ethics code for developers.</p>
      <p>Building information literacy is a collective undertaking that necessitates collaboration between research and educational institutions [<xref ref-type="bibr" rid="ref71">71</xref>], governments, and social media platforms. Research institutions are responsible for advancing the field forward, identifying viable strategies to teach critical thinking skills necessary to build information literacy, especially in the context of public health. Such approaches should be demonstrated to be effective through empirical work [<xref ref-type="bibr" rid="ref66">66</xref>]. Schools and universities, we argue, bear the vital role of incorporating information literacy into curricula, ensuring that students graduate with the necessary skills to evaluate information critically [<xref ref-type="bibr" rid="ref72">72</xref>]. Governments must devise policies and initiatives that promote information literacy as a means of safeguarding the integrity of public health [<xref ref-type="bibr" rid="ref4">4</xref>]. Social media platforms, which serve as primary conduits of information consumption, are tasked with implementing features and mechanisms that facilitate user understanding and evaluation of the information they encounter [<xref ref-type="bibr" rid="ref73">73</xref>], and may also be potential collaborators for research institutions to evaluate the effectiveness of potentially viable digital interventions. In this context, it is important to note that, regardless of the source of disinformation and misinformation, and regardless of whether the content has been generated with or without the help of AI, information literacy and critical thinking skills play a crucial role in the recognition of information accuracy. AI systems have the capacity to generate disinformation that is more sophisticated than human-generated disinformation [<xref ref-type="bibr" rid="ref9">9</xref>], as they excel in employing manipulation tactics. However, these tactics align with those used in human disinformation. This implies that the ability to discern truthfulness and malicious intent in a complex information ecosystem requires possessing the skills necessary to identify the accuracy and intentionality of information in general, not solely when produced by AI. It is therefore crucial to underline that fostering information literacy and critical thinking skills hold the potential to go beyond the issue of AI-generated disinformation and misinformation. These skills empower individuals to assess the accuracy and reliability of information across various domains, whether it originates from AI systems or human sources [<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref74">74</xref>]. Of note, the application of critical thinking skills and information literacy may prove effective for AI-generated content in textual form. However, this might not necessarily hold true for audio or visual content. The emergence of deepfakes poses unprecedented challenges to the relevance of information literacy [<xref ref-type="bibr" rid="ref75">75</xref>]. Evidence from the literature suggests that media literacy education may protect against disinformation produced with deepfakes [<xref ref-type="bibr" rid="ref76">76</xref>]; in line, we suggest that the manipulative intent behind disinformation is likely to manifest irrespective of the media type used, underlying the continued importance of information literacy and critical thinking skills. Tailoring educational approaches to information literacy for different content types is likely to be the required approach to succeed in an increasingly complex information environment. Addressing the advent of AI-disinformation, whether in textual form or deepfake audio and video, demands a swift and adaptable response in education, acknowledging the challenging nature of this task.</p>
    </sec>
    <sec>
      <title>Conclusion</title>
      <p>In evaluating the dual nature of AI in information dissemination, this paper examined the ethical considerations that underlie its use in our increasingly digitized world. The “infodemics” we find ourselves immersed in demand not only our vigilance but also our proactive ethical engagement [<xref ref-type="bibr" rid="ref77">77</xref>]. Our theoretical examination, based on the “ethical desiderata” identified as core areas (fairness, transparency, trustworthiness, accountability, privacy, and empathy) by Siala and Wang [<xref ref-type="bibr" rid="ref24">24</xref>], has revealed a few potentially viable strategies to reduce the negative impact of AI as a tool to generate disinformation with a negative impact on public health. First, we considered that promoting openness and transparency of training datasets could enable independent evaluation, mitigate biases, and help identifying issues in the training dataset that could result in the production of disinformation and misinformation; to a certain extent, this first strategy could be enacted through regulation. Second, we considered the potential benefits and limitations of moderating content output. We have discussed that the rise of impersonation tactics and other prompt engineering approaches to generate disinformation highlights the need for innovative solutions, which potentially include identity verification, the development and integration, within AI-models to generate information, of AI-driven fact-checking tools, as well as the integration of user-friendly reporting mechanisms for disinformation and misinformation, and potentially of legislative measures to ensure accountability. Finally, we discussed the necessity of building information literacy and critical thinking skills within our society, which could help people tell apart fake versus real news and disinformation and misinformation from accurate information. In this way, we can promote resilience against the threats posed by the digital age, particularly those related to public health, as seen during the recent COVID-19 pandemic.</p>
      <p>While the technology advances fast, and these issues are just surfacing, it would be important to, at least temporarily, align the amount of effort and resources invested respectively in the development of new AI models, and in the reflection on their potential impact and subsequent policy work, in order to have enough time to assess the potential downsides of the technology for the health of information ecosystems and the damages for individual and public health. This could be achieved by accelerating ethical reflection and policy-making work, or by slowing down or even halting the development of new and more capable models, or by a combined strategy [<xref ref-type="bibr" rid="ref78">78</xref>].</p>
      <p>Ultimately, the ethical considerations surrounding AI in information production and dissemination demand ongoing vigilance, innovation, and collaboration. Our ability to integrate ethics into AI-based processes of information generation and dissemination will not only shape the future of AI but also determine the integrity of our information ecosystems and the resilience of our societies.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>During the preparation of this work, the authors used ChatGPT as an editorial assistant. After using this tool or service, the authors reviewed and edited the content as needed and take full responsibility for the content of the publication.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Purnat</surname>
              <given-names>TD</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Briand</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Managing Infodemics in the 21st Century: Addressing New Public Health Challenges in the Information Ecosystem</source>
          <year>2023</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer International Publishing</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rothkopf</surname>
              <given-names>DJ</given-names>
            </name>
          </person-group>
          <article-title>When the buzz bites back</article-title>
          <source>Wash Post</source>
          <year>2003</year>
          <access-date>2024-01-16</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.washingtonpost.com/archive/opinions/2003/05/11/when-the-buzz-bites-back/bc8cd84f-cab6-4648-bf58-0277261af6cd/">https://www.washingtonpost.com/archive/opinions/2003/05/11/when-the-buzz-bites-back/bc8cd84f-cab6-4648-bf58-0277261af6cd/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Swire-Thompson</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lazer</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Public health and online misinformation: challenges and recommendations</article-title>
          <source>Annu Rev Public Health</source>
          <year>2020</year>
          <volume>41</volume>
          <fpage>433</fpage>
          <lpage>451</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.annualreviews.org/content/journals/10.1146/annurev-publhealth-040119-094127?crawler=true&amp;mimetype=application/pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.1146/annurev-publhealth-040119-094127</pub-id>
          <pub-id pub-id-type="medline">31874069</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>Directorate General for Research and Innovation</collab>
          </person-group>
          <article-title>European group on ethics in science and new technologies. Opinion on democracy in the digital age</article-title>
          <source>European Commission</source>
          <year>2023</year>
          <access-date>2023-09-21</access-date>
          <publisher-loc>LU</publisher-loc>
          <publisher-name>Publications Office</publisher-name>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://data.europa.eu/doi/10.2777/078780">https://data.europa.eu/doi/10.2777/078780</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Roozenbeek</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Culloty</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Suiter</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Countering misinformation</article-title>
          <source>Eur Psychol Hogrefe Publishing</source>
          <year>2023</year>
          <volume>28</volume>
          <issue>3</issue>
          <fpage>189</fpage>
          <lpage>205</lpage>
          <pub-id pub-id-type="doi">10.1027/1016-9040/a000492</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bontridder</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Poullet</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>The role of artificial intelligence in disinformation</article-title>
          <source>Data Policy Cambridge University Press</source>
          <year>2021</year>
          <volume>3</volume>
          <fpage>e32</fpage>
          <pub-id pub-id-type="doi">10.1017/dap.2021.20</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="book">
          <source>Artificial Intelligence, Deepfakes, and Disinformation</source>
          <year>2022</year>
          <publisher-loc>Santa Monica, CA</publisher-loc>
          <publisher-name>RAND Corporation</publisher-name>
          <fpage>2022</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Galaz</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Metzler</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Daume</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Olsson</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lindström</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Marklund</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>AI could create a perfect storm of climate misinformation</source>
          <access-date>2024-09-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.stockholmresilience.org/download/18.889aab4188bda3f44912a32/1687863825612/SRC_Climate%20misinformation%20brief_A4_.pdf">https://www.stockholmresilience.org/download/18.889aab4188bda3f44912a32/1687863825612/SRC_Climate%20misinformation%20brief_A4_.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Spitale</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Biller-Andorno</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Germani</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>AI model GPT-3 (dis)informs us better than humans</article-title>
          <source>Sci Adv</source>
          <year>2023</year>
          <month>06</month>
          <day>28</day>
          <volume>9</volume>
          <issue>26</issue>
          <fpage>eadh1850</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.science.org/doi/abs/10.1126/sciadv.adh1850?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1126/sciadv.adh1850</pub-id>
          <pub-id pub-id-type="medline">37379395</pub-id>
          <pub-id pub-id-type="pmcid">PMC10306283</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kuo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Marwick</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Critical disinformation studies: History, power, and politics</article-title>
          <source>HKS Misinfo Review</source>
          <year>2021</year>
          <volume>4</volume>
          <issue>2</issue>
          <fpage>12</fpage>
          <pub-id pub-id-type="doi">10.37016/mr-2020-76</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rucinska</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fecko</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mital</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Trust in public institutions in the age of disinformation</article-title>
          <year>2023</year>
          <conf-name>Central and Eastern European eDem and eGov Days</conf-name>
          <conf-date>2023 September 14-15</conf-date>
          <conf-loc>Budapest, Hungary</conf-loc>
          <publisher-loc>New York, NY, United States</publisher-loc>
          <publisher-name>ACM</publisher-name>
          <fpage>111</fpage>
          <lpage>117</lpage>
          <pub-id pub-id-type="doi">10.1145/3603304.3604075</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tucker</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Guess</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Barbera</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Vaccari</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Siegel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sanovich</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stukal</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Nyhan</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Social media, political polarization, and political disinformation: a review of the scientific literature</article-title>
          <source>SSRN Electron J</source>
          <year>2018</year>
          <fpage>1</fpage>
          <lpage>95</lpage>
          <pub-id pub-id-type="doi">10.2139/ssrn.3144139</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McKay</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tenove</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Disinformation as a threat to deliberative democracy</article-title>
          <source>Polit Res Q SAGE Publications Inc</source>
          <year>2021</year>
          <volume>74</volume>
          <issue>3</issue>
          <fpage>703</fpage>
          <lpage>717</lpage>
          <pub-id pub-id-type="doi">10.1177/1065912920938143</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="web">
          <article-title>Global risks 2024: disinformation tops global risks 2024 as environmental threats intensify</article-title>
          <source>World Econ Forum</source>
          <access-date>2024-04-04</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.weforum.org/press/2024/01/global-risks-report-2024-press-release/">https://www.weforum.org/press/2024/01/global-risks-report-2024-press-release/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="web">
          <source>Natural Language Processing (NLP) - A Complete Guide</source>
          <year>2023</year>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.deeplearning.ai/resources/natural-language-processing/">https://www.deeplearning.ai/resources/natural-language-processing/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <source>GPT-3 powers the next generation of apps</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/blog/gpt-3-apps">https://openai.com/blog/gpt-3-apps</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <source>GPT-4</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/research/gpt-4">https://openai.com/research/gpt-4</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bubeck</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chandrasekaran</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Eldan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gehrke</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Kamar</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lundberg</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nori</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Palangi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ribeiro</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Sparks of artificial general intelligencearly experiments with GPT-4</article-title>
          <source>arXiv</source>
          <year>2023</year>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2303.12712">http://arxiv.org/abs/2303.12712</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karinshak</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>AI-driven disinformation: a framework for organizational preparation and response</article-title>
          <source>JCOM</source>
          <year>2023</year>
          <volume>27</volume>
          <issue>4</issue>
          <fpage>539</fpage>
          <lpage>562</lpage>
          <pub-id pub-id-type="doi">10.1108/jcom-09-2022-0113</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Köbis</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mossink</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence versus Maya Angelou: experimental evidence that people cannot differentiate AI-generated from human-written poetry</article-title>
          <source>Comput Hum Behav</source>
          <year>2021</year>
          <volume>114</volume>
          <fpage>106553</fpage>
          <pub-id pub-id-type="doi">10.1016/j.chb.2020.106553</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Casal</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Kessler</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Can linguists distinguish between ChatGPT/AI and human writing?: A study of research ethics and academic publishing</article-title>
          <source>Res Methods Appl Linguist</source>
          <year>2023</year>
          <volume>2</volume>
          <issue>3</issue>
          <fpage>100068</fpage>
          <pub-id pub-id-type="doi">10.1016/j.rmal.2023.100068</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Anderljung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Barnhart</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Korinek</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Leung</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>O'Keefe</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Whittlestone</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Avin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brundage</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bullock</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cass-Beggs</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Fist</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hadfield</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hayes</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hooker</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Kolt</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Schuett</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shavit</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Siddarth</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Trager</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wolf</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Frontier AI regulation: managing emerging risks to public safety</article-title>
          <source>arXiv</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2307.03718">http://arxiv.org/abs/2307.03718</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Germani</surname>
              <given-names>Federico</given-names>
            </name>
            <name name-style="western">
              <surname>Spitale</surname>
              <given-names>Giovanni</given-names>
            </name>
            <name name-style="western">
              <surname>Machiri</surname>
              <given-names>Sandra Varaidzo</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>Calvin Wai Loon</given-names>
            </name>
            <name name-style="western">
              <surname>Ballalai</surname>
              <given-names>Isabella</given-names>
            </name>
            <name name-style="western">
              <surname>Biller-Andorno</surname>
              <given-names>Nikola</given-names>
            </name>
            <name name-style="western">
              <surname>Reis</surname>
              <given-names>Andreas Alois</given-names>
            </name>
          </person-group>
          <article-title>Ethical Considerations in Infodemic Management: Systematic Scoping Review</article-title>
          <source>JMIR Infodemiology</source>
          <year>2024</year>
          <month>08</month>
          <day>29</day>
          <volume>4</volume>
          <fpage>e56307</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://infodemiology.jmir.org/2024//e56307/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/56307</pub-id>
          <pub-id pub-id-type="medline">39208420</pub-id>
          <pub-id pub-id-type="pii">v4i1e56307</pub-id>
          <pub-id pub-id-type="pmcid">PMC11393515</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siala</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>SHIFTing artificial intelligence to be responsible in healthcare: a systematic review</article-title>
          <source>Soc Sci Med</source>
          <year>2022</year>
          <volume>296</volume>
          <fpage>114782</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0277-9536(22)00085-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.socscimed.2022.114782</pub-id>
          <pub-id pub-id-type="medline">35152047</pub-id>
          <pub-id pub-id-type="pii">S0277-9536(22)00085-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="web">
          <article-title>Ethics and governance of artificial intelligence for health: guidance on large multi-modal models</article-title>
          <source>WHO</source>
          <access-date>2024-01-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://iris.who.int/handle/10665/375579">https://iris.who.int/handle/10665/375579</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Larsson</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Heintz</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Transparency in artificial intelligence</article-title>
          <source>Internet Policy Rev</source>
          <year>2020</year>
          <volume>9</volume>
          <issue>2</issue>
          <fpage>1</fpage>
          <lpage>16</lpage>
          <pub-id pub-id-type="doi">10.14763/2020.2.1469</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Felzmann</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Fosch-Villaronga</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lutz</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Tamò-Larrieux</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Towards transparency by design for artificial intelligence</article-title>
          <source>Sci Eng Ethics</source>
          <year>2020</year>
          <volume>26</volume>
          <issue>6</issue>
          <fpage>3333</fpage>
          <lpage>3361</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33196975"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11948-020-00276-4</pub-id>
          <pub-id pub-id-type="medline">33196975</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11948-020-00276-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC7755865</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="web">
          <article-title>Ethics guidelines for trustworthy AI | Shaping Europe’s digital future</article-title>
          <source>European Commission</source>
          <year>2019</year>
          <access-date>2024-01-16</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai">https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Giovanola</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Tiribelli</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Beyond bias and discrimination: redefining the AI ethics principle of fairness in healthcare machine-learning algorithms</article-title>
          <source>AI Soc</source>
          <year>2023</year>
          <volume>38</volume>
          <issue>2</issue>
          <fpage>549</fpage>
          <lpage>563</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35615443"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00146-022-01455-6</pub-id>
          <pub-id pub-id-type="medline">35615443</pub-id>
          <pub-id pub-id-type="pii">1455</pub-id>
          <pub-id pub-id-type="pmcid">PMC9123626</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="web">
          <article-title>The evolution of generative AI: a deep dive into the life cycle and training of advanced language models?</article-title>
          <source>LinkedIn</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.linkedin.com/pulse/evolution-generative-ai-deep-dive-life-cycle-training-aritra-ghosh/">https://www.linkedin.com/pulse/evolution-generative-ai-deep-dive-life-cycle-training-aritra-ghosh/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sachdeva</surname>
              <given-names>PS</given-names>
            </name>
            <name name-style="western">
              <surname>Barreto</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>von</surname>
              <given-names>VC</given-names>
            </name>
            <name name-style="western">
              <surname>Kennedy</surname>
              <given-names>CJ</given-names>
            </name>
          </person-group>
          <article-title>Assessing annotator identity sensitivity via item response theory: a case study in a hate speech corpus</article-title>
          <year>2022</year>
          <conf-name>Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency</conf-name>
          <conf-date>2022 June 21-24</conf-date>
          <conf-loc>Seoul Republic of Korea</conf-loc>
          <publisher-loc>USA</publisher-loc>
          <publisher-name>Association for Computing Machinery</publisher-name>
          <pub-id pub-id-type="doi">10.1145/3531146.3533216</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>GPT-3 and InstructGPT: technological dystopianism, utopianism, and “Contextual” perspectives in AI ethics and industry</article-title>
          <source>AI Ethics</source>
          <year>2023</year>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>53</fpage>
          <lpage>64</lpage>
          <pub-id pub-id-type="doi">10.1007/s43681-022-00148-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bender</surname>
              <given-names>EM</given-names>
            </name>
            <name name-style="western">
              <surname>Gebru</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>McMillan-Major</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shmitchell</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>On the dangers of stochastic parrots: can language models be too big?</article-title>
          <year>2021</year>
          <conf-name>Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency</conf-name>
          <conf-date>2021 March 3-10</conf-date>
          <conf-loc>Virtual Event, Canada</conf-loc>
          <publisher-loc>USA</publisher-loc>
          <publisher-name>Association for Computing Machinery</publisher-name>
          <fpage>610</fpage>
          <lpage>623</lpage>
          <pub-id pub-id-type="doi">10.1145/3442188.3445922</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ntoutsi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Fafalios</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Gadiraju</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Iosifidis</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Nejdl</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Vidal</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ruggieri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Turini</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Papadopoulos</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krasanakis</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Kompatsiaris</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Kinder-Kurlanda</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wagner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Karimi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Fernandez</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Alani</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Berendt</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kruegel</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Heinze</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Broelemann</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kasneci</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Tiropanis</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Staab</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Bias in data-driven artificial intelligence systems—An introductory survey</article-title>
          <source>WIREs Data Min Knowl Discov</source>
          <year>2020</year>
          <volume>10</volume>
          <issue>3</issue>
          <fpage>e1356</fpage>
          <pub-id pub-id-type="doi">10.1002/widm.1356</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Gaut</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>ElSherief</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mirza</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Belding</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Mitigating gender bias in natural language processing: literature review</article-title>
          <year>2019</year>
          <conf-name>Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics</conf-name>
          <conf-date>2019 July 28- August 2</conf-date>
          <conf-loc>Florence, Italy</conf-loc>
          <publisher-name>Association for Computational Linguistics</publisher-name>
          <fpage>1630</fpage>
          <lpage>1640</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/p19-1159</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hovy</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Prabhumoye</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Five sources of bias in natural language processing</article-title>
          <source>Lang Linguist Compass</source>
          <year>2021</year>
          <volume>15</volume>
          <issue>8</issue>
          <fpage>e12432</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35864931"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/lnc3.12432</pub-id>
          <pub-id pub-id-type="medline">35864931</pub-id>
          <pub-id pub-id-type="pii">LNC312432</pub-id>
          <pub-id pub-id-type="pmcid">PMC9285808</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Steed</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Caliskan</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Image representations learned with unsupervised pre-training contain human-like biases</article-title>
          <year>2021</year>
          <conf-name>Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency</conf-name>
          <conf-date>2021 March 3-10</conf-date>
          <conf-loc>Virtual Event, Canada</conf-loc>
          <fpage>701</fpage>
          <lpage>713</lpage>
          <pub-id pub-id-type="doi">10.1145/3442188.3445932</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="web">
          <article-title>How it feels to be sexually objectified by an AI</article-title>
          <source>MIT Technol Rev</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.technologyreview.com/2022/12/13/1064810/how-it-feels-to-be-sexually-objectified-by-an-ai/">https://www.technologyreview.com/2022/12/13/1064810/how-it-feels-to-be-sexually-objectified-by-an-ai/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Castaneda</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jover</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Calvet</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yanes</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Juan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sainz</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Dealing with gender bias issues in data-algorithmic processes: a social-statistical perspective</article-title>
          <source>Algorithms Multidisciplinary Digital Publishing Institute</source>
          <year>2022</year>
          <volume>15</volume>
          <issue>9</issue>
          <fpage>303</fpage>
          <pub-id pub-id-type="doi">10.3390/a15090303</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wellner</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Rothman</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Feminist AI: can we expect our AI systems to become feminist?</article-title>
          <source>Philos Technol</source>
          <year>2020</year>
          <volume>33</volume>
          <issue>2</issue>
          <fpage>191</fpage>
          <lpage>205</lpage>
          <pub-id pub-id-type="doi">10.1007/s13347-019-00352-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Parker</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>Synthetic lies: understanding AI-generated misinformation and evaluating algorithmic and human solutions</article-title>
          <year>2023</year>
          <conf-name>Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>2023 April 23 - 28</conf-date>
          <conf-loc>Hamburg, Germany</conf-loc>
          <publisher-name>ACM</publisher-name>
          <fpage>1</fpage>
          <lpage>20</lpage>
          <pub-id pub-id-type="doi">10.1145/3544548.3581318</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vinay</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Spitale</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Biller-Andorno</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Germani</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Emotional manipulation through prompt engineering amplifies disinformation generation in AI large language models</article-title>
          <source>Computer Science &gt; Artificial Intelligence</source>
          <year>2024</year>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <pub-id pub-id-type="doi">10.48550/arXiv.2403.03550</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="web">
          <article-title>Four years later, AI language dataset created by brown graduate students goes viral</article-title>
          <source>Brown Univ</source>
          <year>2023</year>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.brown.edu/news/2023-04-25/open-web-text">https://www.brown.edu/news/2023-04-25/open-web-text</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Patenaude</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Legault</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Beauvais</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bernier</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Béland</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Boissy</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chenel</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Daniel</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Genest</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Poirier</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tapin</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Framework for the Analysis of Nanotechnologies? Impacts and ethical acceptability: basis of an interdisciplinary approach to assessing novel technologies</article-title>
          <source>Sci Eng Ethics</source>
          <year>2025</year>
          <volume>21</volume>
          <issue>2</issue>
          <fpage>293</fpage>
          <lpage>315</lpage>
          <pub-id pub-id-type="doi">10.1007/s11948-014-9543-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taebi</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Bridging the gap between social acceptance and ethical acceptability</article-title>
          <source>Risk Anal</source>
          <year>2017</year>
          <volume>37</volume>
          <issue>10</issue>
          <fpage>1817</fpage>
          <lpage>1827</lpage>
          <pub-id pub-id-type="doi">10.1111/risa.12734</pub-id>
          <pub-id pub-id-type="medline">27862106</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hacker</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>A legal framework for AI training data from first principles to the artificial intelligence act</article-title>
          <source>Law Innov Technol Routledge</source>
          <year>2021</year>
          <volume>13</volume>
          <issue>2</issue>
          <fpage>257</fpage>
          <lpage>301</lpage>
          <pub-id pub-id-type="doi">10.1080/17579961.2021.1977219</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="web">
          <article-title>Artificial intelligence act: deal on comprehensive rules for trustworthy AI</article-title>
          <source>News | European Parliament</source>
          <year>2023</year>
          <access-date>2024-01-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.europarl.europa.eu/news/en/press-room/20231206IPR15699/artificial-intelligence-act-deal-on-comprehensive-rules-for-trustworthy-ai">https://www.europarl.europa.eu/news/en/press-room/20231206IPR15699/artificial-intelligence-act-deal-on-comprehensive-rules-for-trustworthy-ai</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goldstein</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sastry</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Musser</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>DiResta</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gentzel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sedova</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Generative language models and automated influence operations: emerging threats and potential mitigations</article-title>
          <source>arXiv</source>
          <year>2023</year>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2301.04246">http://arxiv.org/abs/2301.04246</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="web">
          <source>Lessons Learned on Language Model Safety and Misuse</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/research/language-model-safety-and-misuse">https://openai.com/research/language-model-safety-and-misuse</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ganguli</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Lovitt</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kernion</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Askell</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bai</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kadavath</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Perez</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Schiefer</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ndousse</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bowman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Conerly</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>DasSarma</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Drain</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Elhage</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>El-Showk</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fort</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hatfield-Dodds</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Henighan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hernandez</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Hume</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Jacobson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Johnston</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kravec</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Olsson</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ringer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tran-Johnson</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Amodei</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Joseph</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>McCandlish</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Olah</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kaplan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Clark</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Red teaming language models to reduce harms: methods, scaling behaviors, and lessons learned</article-title>
          <source>arXiv</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2209.07858">http://arxiv.org/abs/2209.07858</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="web">
          <source>The AI Detection Arms Race Is On | WIRED</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.wired.com/story/ai-detection-chat-gpt-college-students/">https://www.wired.com/story/ai-detection-chat-gpt-college-students/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Arms race instability and war</article-title>
          <source>J Confl Resolut SAGE Publications Inc</source>
          <year>1980</year>
          <volume>24</volume>
          <issue>2</issue>
          <fpage>253</fpage>
          <lpage>284</lpage>
          <pub-id pub-id-type="doi">10.1177/002200278002400204</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="web">
          <article-title>ChatGPT and the fight against disinformation: how AI is changing the game</article-title>
          <source>Culturico</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://culturico.com/2023/03/04/chatgpt-and-the-fight-against-disinformation-how-ai-is-changing-the-game/">https://culturico.com/2023/03/04/chatgpt-and-the-fight-against-disinformation-how-ai-is-changing-the-game/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Germani</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Biller-Andorno</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>How to counter the anti-vaccine rhetoric: filling information voids and building resilience</article-title>
          <source>Hum Vaccin Immunother</source>
          <year>2022</year>
          <volume>18</volume>
          <issue>6</issue>
          <fpage>2095825</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35802046"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/21645515.2022.2095825</pub-id>
          <pub-id pub-id-type="medline">35802046</pub-id>
          <pub-id pub-id-type="pmcid">PMC9746393</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="web">
          <article-title>Convention on cybercrime ETS - No. 185. 2001</article-title>
          <source>Council of Europe</source>
          <year>2001</year>
          <access-date>2024-09-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://rm.coe.int/1680081561">https://rm.coe.int/1680081561</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="web">
          <source>How Digital Identity can Protect Against Misuse of AI</source>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://oneid.uk/news-and-events/how-digital-identity-can-protect-against-misuse-of-ai">https://oneid.uk/news-and-events/how-digital-identity-can-protect-against-misuse-of-ai</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahmad</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Berg</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Combating Fake News with Digital Identity Verification</source>
          <access-date>2024-09-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://groups.csail.mit.edu/mac/classes/6.805/student-papers/fall17-papers/FakeNews.pdf">https://groups.csail.mit.edu/mac/classes/6.805/student-papers/fall17-papers/FakeNews.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>DeVerna</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Yan</surname>
              <given-names>HY</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Menczer</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence is ineffective and potentially harmful for fact checking</article-title>
          <source>arXiv</source>
          <year>2023</year>
          <access-date>2023-09-20</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2308.10800">http://arxiv.org/abs/2308.10800</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sebastian</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Exploring ethical implications of ChatGPT and other AI Chatbots and regulation of disinformation propagation</article-title>
          <source>SSRN</source>
          <year>2023</year>
          <fpage>1</fpage>
          <lpage>16</lpage>
          <pub-id pub-id-type="doi">10.2139/ssrn.4461801</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="web">
          <source>About Community Notes on X | X Help</source>
          <access-date>2023-09-21</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://help.twitter.com/en/using-x/community-notes">https://help.twitter.com/en/using-x/community-notes</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="web">
          <article-title>Directorate general for parliamentary research services. Regulating disinformation with artificial intelligence: effects of disinformation initiatives on freedom of expression and media pluralism</article-title>
          <source>European Parliament</source>
          <year>2019</year>
          <access-date>2023-09-21</access-date>
          <publisher-loc>LU</publisher-loc>
          <publisher-name>Publications Office</publisher-name>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://data.europa.eu/doi/10.2861/003689">https://data.europa.eu/doi/10.2861/003689</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <source>Regulating Disinformation with Artificial Intelligence?</source>
          <access-date>2024-09-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.europarl.europa.eu/RegData/etudes/STUD/2019/624279/EPRS_STU(2019)624279_EN.pdf">https://www.europarl.europa.eu/RegData/etudes/STUD/2019/624279/EPRS_STU(2019)624279_EN.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harris</surname>
              <given-names>ED</given-names>
            </name>
          </person-group>
          <source>Governance of Dual-Use Technologies</source>
          <access-date>2024-09-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.amacad.org/publication/governance-dual-use-technologies-theory-and-practice/section/3">https://www.amacad.org/publication/governance-dual-use-technologies-theory-and-practice/section/3</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Appedu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hensley</surname>
              <given-names>MK</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Sietz</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Problematizing the role of information literacy in disinformation, dialogue, the healing of democracy</article-title>
          <source>Inf Lit Time Transform</source>
          <year>2021</year>
          <publisher-loc>Michigan</publisher-loc>
          <publisher-name>LOEX Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="web">
          <source>Ringing the Alarm Bell with Federico Germani</source>
          <access-date>2023-09-21</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.andybusam.com/ringing-the-alarm-bell-with-federico-germani/">https://www.andybusam.com/ringing-the-alarm-bell-with-federico-germani/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname> Redaelli</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Biller-Andorno</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Gloeckler</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Spitale</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Germani</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Mastering critical thinking skills is strongly associated with the ability to recognize fakeness and misinformation</article-title>
          <source>SocArXiv (OSF)</source>
          <year>2024</year>
          <pub-id pub-id-type="doi">10.31235/osf.io/hsz6a</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jones-Jang</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Mortensen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Does media literacy help identification of fake news? Information literacy helps, but other literacies don't</article-title>
          <source>Am Behav Sci</source>
          <year>2019</year>
          <volume>65</volume>
          <issue>2</issue>
          <fpage>371</fpage>
          <lpage>388</lpage>
          <pub-id pub-id-type="doi">10.1177/0002764219869406</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>De</surname>
              <given-names>PS</given-names>
            </name>
            <name name-style="western">
              <surname>Heravi</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Information literacy and fake news: how the field of librarianship can help combat the epidemic of fake news</article-title>
          <source>J Acad Librariansh</source>
          <year>2020</year>
          <volume>46</volume>
          <issue>5</issue>
          <fpage>102218</fpage>
          <pub-id pub-id-type="doi">10.1016/j.acalib.2020.102218</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Willingham</surname>
              <given-names>DT</given-names>
            </name>
          </person-group>
          <article-title>Ask the cognitive scientist: how can educators teach critical thinking?</article-title>
          <source>Am Educ American Federation of Teachers, AFL-CIO</source>
          <year>2020</year>
          <volume>3</volume>
          <issue>41</issue>
          <fpage>44</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gaillard</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Oláh</surname>
              <given-names>ZA</given-names>
            </name>
            <name name-style="western">
              <surname>Venmans</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Burke</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Countering the cognitive, linguistic, and psychological underpinnings behind susceptibility to fake news: a review of current literature with special focus on the role of age and digital literacy</article-title>
          <source>Front Commun</source>
          <year>2021</year>
          <access-date>2023-09-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.frontiersin.org/articles/10.3389/fcomm.2021.661801">https://www.frontiersin.org/articles/10.3389/fcomm.2021.661801</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Allner</surname>
              <given-names>IB</given-names>
            </name>
          </person-group>
          <source>Teaching of Information Literacy: Collaboration Between Teaching Faculty and Librarians</source>
          <year>2011</year>
          <publisher-loc>US</publisher-loc>
          <publisher-name>BiblioBazaar</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Johnston</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Webber</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Information literacy in higher education: a review and case study</article-title>
          <source>Stud High Educ Routledge</source>
          <year>2003</year>
          <volume>28</volume>
          <issue>3</issue>
          <fpage>335</fpage>
          <lpage>352</lpage>
          <pub-id pub-id-type="doi">10.1080/03075070309295</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Burclaff</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <source>Teaching Information Literacy via Social Media: An Exploration of Connectivism</source>
          <access-date>2024-09-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/publication/316187027_Teaching_Information_Literacy_via_Social_Media_An_Exploration_of_Connectivism">https://www.researchgate.net/publication/316187027_Teaching_Information_Literacy_via_Social_Media_An_Exploration_of_Connectivism</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="web">
          <article-title>Fake news created by artificial intelligence is difficult to recognize. They seem more credible to Internet users than messages created by humans</article-title>
          <source>Bizness</source>
          <access-date>2023-09-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://biznes.newseria.pl/news/fake-newsy-stworzone-przez,p919781558">http://biznes.newseria.pl/news/fake-newsy-stworzone-przez,p919781558</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tiernan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Costello</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Donlon</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Parysz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Scriney</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Information and media literacy in the age of AI: options for the future</article-title>
          <source>Educ Sci Multidisciplinary Digital Publishing Institute</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>9</issue>
          <fpage>906</fpage>
          <pub-id pub-id-type="doi">10.3390/educsci13090906</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hwang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ryu</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Effects of disinformation using deepfake: the protective effect of media literacy education</article-title>
          <source>Cyberpsychol Behav Soc Netw</source>
          <year>2021</year>
          <volume>24</volume>
          <issue>3</issue>
          <fpage>188</fpage>
          <lpage>193</lpage>
          <pub-id pub-id-type="doi">10.1089/cyber.2020.0174</pub-id>
          <pub-id pub-id-type="medline">33646021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="web">
          <source>WHO Kicks off Deliberations on Ethical Framework and Tools for Social Listening and Infodemic Management</source>
          <access-date>2023-09-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.who.int/news/item/10-02-2023-who-kicks-off-deliberations-on-ethical-framework-and-tools-for-social-listening-and-infodemic-management">https://www.who.int/news/item/10-02-2023-who-kicks-off-deliberations-on-ethical-framework-and-tools-for-social-listening-and-infodemic-management</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="web">
          <article-title>Pause giant AI experiments: an open letter</article-title>
          <source>Future Life Inst</source>
          <access-date>2023-10-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://futureoflife.org/open-letter/pause-giant-ai-experiments/">https://futureoflife.org/open-letter/pause-giant-ai-experiments/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
