<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR AI</journal-id><journal-id journal-id-type="publisher-id">ai</journal-id><journal-id journal-id-type="index">41</journal-id><journal-title>JMIR AI</journal-title><abbrev-journal-title>JMIR AI</abbrev-journal-title><issn pub-type="epub">2817-1705</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v5i1e81977</article-id><article-id pub-id-type="doi">10.2196/81977</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>AI-Generated Images of Substance Use and Recovery: Mixed Methods Case Study</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Heley</surname><given-names>Kathryn</given-names></name><degrees>PhD, MPH</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Hom</surname><given-names>Jeffrey K</given-names></name><degrees>MD, MPH, MSHP</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Laestadius</surname><given-names>Linnea</given-names></name><degrees>PhD, MPP</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Zilber College of Public Health, University of Wisconsin Milwaukee</institution><addr-line>1240 N 10th St</addr-line><addr-line>Milwaukee</addr-line><addr-line>WI</addr-line><country>United States</country></aff><aff id="aff2"><institution>Rutgers Institute for Nicotine &#x0026; Tobacco Studies, Rutgers University</institution><addr-line>New Brunswick</addr-line><addr-line>NJ</addr-line><country>United States</country></aff><aff id="aff3"><institution>DrPH Program, Johns Hopkins Bloomberg School of Public Health</institution><addr-line>Baltimore</addr-line><addr-line>MD</addr-line><country>United States</country></aff><aff id="aff4"><institution>Behavioral Health Services, San Francisco Department of Public Health</institution><addr-line>San Francisco</addr-line><addr-line>CA</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Emam</surname><given-names>Khaled El</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Mentis</surname><given-names>Alexios-Fotios A</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Grosser</surname><given-names>John</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Linnea Laestadius, PhD, MPP, Zilber College of Public Health, University of Wisconsin Milwaukee, 1240 N 10th St, Milwaukee, WI, 53205, United States, 1 (414) 251-5607; <email>llaestad@uwm.edu</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>all authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>19</day><month>2</month><year>2026</year></pub-date><volume>5</volume><elocation-id>e81977</elocation-id><history><date date-type="received"><day>06</day><month>08</month><year>2025</year></date><date date-type="rev-recd"><day>11</day><month>12</month><year>2025</year></date><date date-type="accepted"><day>18</day><month>01</month><year>2026</year></date></history><copyright-statement>&#x00A9; Kathryn Heley, Jeffrey K Hom, Linnea Laestadius. Originally published in JMIR AI (<ext-link ext-link-type="uri" xlink:href="https://ai.jmir.org">https://ai.jmir.org</ext-link>), 19.2.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.ai.jmir.org/">https://www.ai.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://ai.jmir.org/2026/1/e81977"/><abstract><sec><title>Background</title><p>Images created with generative artificial intelligence (AI) tools are increasingly used for health communication due to their ease of use, speed, accessibility, and low cost. However, AI-generated images may bring practical and ethical risks to health practitioners and the public, including through the perpetuation of stigma against vulnerable and historically marginalized groups.</p></sec><sec><title>Objective</title><p>To understand the potential value of AI-generated images for health care and public health communication, we sought to analyze images of substance use disorder and recovery generated with ChatGPT. Specifically, we sought to investigate: (1) the default visual outputs produced in response to a range of prompts about substance use disorder and recovery, and (2) the extent to which prompt modification and guideline-informed prompting could mitigate potentially stigmatizing imagery.</p></sec><sec sec-type="methods"><title>Methods</title><p>We performed a mixed-methods case study examining depictions of substance use and recovery in images generated by ChatGPT 4.o. We generated images (n=84) using (1) prompts with colloquial and stigmatizing language, (2) prompts that follow best practices for person-first language, (3) image prompts written by ChatGPT, and (4) a custom GPT informed by guidelines for images of SUD. We then used a mixed-methods approach to analyze images for demographics and stigmatizing elements.</p></sec><sec sec-type="results"><title>Results</title><p>Images produced in the default ChatGPT model featured primarily White men (81%, n=34). Further, images tended to be stigmatizing, featuring injection drug use, dark colors, and symbolic elements such as chains. These trends persisted even when person-first language prompts were used. Images produced by the guideline-informed custom GPT were markedly less stigmatizing; however, they featured almost only Black women (74%, n=31).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Our findings confirm prior research about stigma and biases in AI-generated images and extend this literature to substance use. However, our findings also suggest that (1) images can be improved when clear guidelines are provided and (2) even with guidelines, iteration is needed to create an image that fully concords with best practices.</p></sec></abstract><kwd-group><kwd>Substance use</kwd><kwd>health communication</kwd><kwd>visual communication</kwd><kwd>artificial intelligence</kwd><kwd>generative AI</kwd><kwd>AI bias</kwd><kwd>implicit bias</kwd><kwd>stigma</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Generative artificial intelligence (AI) tools are now widely available and increasingly used for image generation. News outlets and public health entities have begun using text-to-image AI tools due to their ease of use, speed, accessibility, and low cost [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. There have been calls for increased use of generative AI tools in public health, and AI has been described as critical to achieving &#x201C;Public Health 3.0,&#x201D; a public health practice that centers on cross-sector collaboration and the adoption of new skills, tools, and types of data to &#x201C;meet the evolving challenges to population health.&#x201D; [<xref ref-type="bibr" rid="ref3">3</xref>] AI-generated images may also be promising for patient and medical education, including the creation of visual representations of patient narratives, illustrations for didactic lectures for medical students, and visual aids for patients [<xref ref-type="bibr" rid="ref4">4</xref>].</p><p>AI-generated images also bring significant risks for health communication [<xref ref-type="bibr" rid="ref5">5</xref>]. These images may perpetuate stigma, as AI platforms can be trained on biased datasets that reflect harmful stereotypes [<xref ref-type="bibr" rid="ref6">6</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]. However, research to date has only examined images resulting from simple prompts, rather than from detailed parameters for appropriate images. Questions remain about AI-generated images in clinical training and health promotion, particularly for highly stigmatized topics.</p><p>In this study, we analyze ChatGPT-generated images of substance use disorder (SUD) and recovery. This focus is warranted and timely, as the media often uses images in its reporting of the country&#x2019;s overdose crisis, and drug-related stigma is an impediment to care [<xref ref-type="bibr" rid="ref10">10</xref>]. High levels of stigma exist toward people with SUDs among health care providers, whose preclinical education often incorporates ample images [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. While no study has examined AI-generated images of substance use, prior research suggests that AI-generated images can reinforce mental health-related stigma by reflecting &#x201C;historical biases and visual archetypes&#x201D;.[<xref ref-type="bibr" rid="ref6">6</xref>] Thus, AI-generated images may compound existing stigmas, raising ethical and practical concerns about the increased adoption of such images in clinical and public health communication materials.</p><p>Substance use also serves as a valuable case study, given existing literature on stigma reduction in health communication. Guidelines for empathic drug-related images have been developed, largely shaped by input from people with lived experience, while experimental research finds that depictions of recovery can reduce SUD-related stigma [<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref20">20</xref>]. Additionally, research supports the use of &#x201C;person-first&#x201D; language and destigmatizing terms when describing SUD [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. With the growth of text-to-image AI tools, it is critical to consider how the language used in prompts may impact resulting images.</p><p>To explore the content and implications of AI-generated images of substance use and recovery, we used a mixed-methods case study approach to analyze AI-generated images from ChatGPT, using commonly used terms and person-first language as prompts and varying the inclusion of detailed, empathy-oriented image guidelines. Findings also contribute to the understanding of the potential value of AI-generated images for health care and public health communication more broadly.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><p>We used a mixed-methods case study design to examine the outputs of a single AI model under different prompting conditions. A mixed-methods case study is appropriate when the objective is to understand a system in depth and to integrate qualitative and quantitative evidence to generate contextualized knowledge rather than to determine statistical generalization [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. This approach is well-suited for exploratory work on emerging technologies and complex interactions between people and technology. Qualitative analysis allowed us to identify and interpret visual patterns, while quantitative coding helped formalize and organize the description of those patterns, strengthening internal validity and making our interpretation more systematic and comparable across prompts [<xref ref-type="bibr" rid="ref25">25</xref>]. We selected this design because our goal was to characterize image outputs and examine how prompting strategies shape representational patterns, rather than to infer population-level effects or causal relationships.</p><sec id="s2-1"><title>Model Selection</title><p>ChatGPT-4.o&#x2019;s image model was launched in March 2025 (OpenAI). It is widely available and easy to access for public health professionals and the public, making it a relevant platform for exploring how SUD and recovery are represented in AI-generated imagery.</p></sec><sec id="s2-2"><title>Image Generation</title><p>Image generation followed a stepwise protocol to investigate: (1) the default visual outputs produced in response to different prompts about SUD and recovery; (2) the extent to which prompt modification and guideline-informed prompting could mitigate potentially stigmatizing imagery. Image generation was conducted by three researchers using ChatGPT Plus accounts, in three US states (ie, California, Maryland, Wisconsin) in June 2025. To remove bias and influence from prior interactions, chat history and memory were disabled, and each prompt was entered into a new chat session.</p><p>The format of each prompt was &#x201C;Please make an image of [term or phrase].&#x201D; If ChatGPT responded with suggestions, we replied, &#x201C;Yes, please make an image that meets these criteria.&#x201D; Major prompt categories included (see <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref> and <xref ref-type="supplementary-material" rid="app2">2</xref> for all 14 prompts):</p><list list-type="order"><list-item><p>General terms (eg, &#x201C;substance use disorder&#x201D;), including some terms known to be stigmatizing (eg, &#x201C;an addict&#x201D;) because they are familiar to the public and continue to be used in health messaging despite guidelines [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]</p></list-item><list-item><p>Person-first language (eg, &#x201C;a person with a substance use disorder&#x201D;)</p></list-item><list-item><p>ChatGPT-written prompts aligned with best practices for SUD-related messaging (eg, &#x201C;Please write a detailed prompt for a respectful and compassionate image of a person with a substance use disorder&#x201D;). Resulting prompts were used to generate images</p></list-item></list><p>We then created a custom GPT&#x2014;a tailored version of ChatGPT that incorporated additional knowledge. We uploaded five existing SUD-related image guidelines, with instructions for ChatGPT to adhere to these when creating images [<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref19">19</xref>]. We then repeated all 14 prompts within this custom GPT.</p></sec><sec id="s2-3"><title>Coding and Analysis</title><p>Resulting images (n=84) were evaluated using a mixed-methods approach. In line with existing AI-generated image research, we first conducted a qualitative analysis to inductively identify recurring patterns. The research team conducted open-coding, identifying image features that aligned with the study aims. We separately reviewed all images, meeting regularly to review findings, resolve discrepancies, and strengthen confirmability and credibility.</p><p>To understand the frequency of image features, a structured coding instrument was created. Coding assessed demographics of the central figure in the image; visual features related to SUD and recovery; and the presence of stigmatizing and humanizing elements. Codes for the last of these were informed by the guideline documents on reducing SUD-related stigma in visual media and images. Codes for visual features and stigmatizing and humanizing elements were not mutually exclusive. Stigma was assessed based on the presence of six features synthesized from guidelines that were consistently recommended to be avoided to create &#x201C;thoughtful representation&#x201D; and &#x201C;visuals that promote dignity, inclusion, and recovery&#x201D;[<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref19">19</xref>]. Specifically, these features were (1) showing paraphernalia, drugs, or active use, (2) dark setting, (3) dramatized, visualizing &#x201C;rock bottom,&#x201D; (4) visibly struggling, (5) isolation and internal sense of shame, and (6) messiness and chaos. If an image included at least 3 criteria from this list, it was labeled as a &#x201C;yes&#x201D; for &#x201C;highly stigmatizing.&#x201D; If an image included 1&#x2010;2 criteria from this list, it was labeled as &#x201C;potentially stigmatizing.&#x201D; If an image included no criteria, it was labeled as &#x201C;no&#x201D; for stigmatizing. All images were coded independently by two researchers using Microsoft Excel. Codes met conventional standards for adequate reliability, with high percent agreement and kappa values of 0.69 or higher [<xref ref-type="bibr" rid="ref27">27</xref>]. Discrepancies were reconciled through discussion involving the third researcher. STATA (v.18.0; StataCorp) was used for all analyses. Through discussion, quantitative and qualitative data were refined into themes describing visual patterns across prompt types.</p></sec><sec id="s2-4"><title>Ethical Considerations</title><p>Approval by an institutional review board was not sought as the study did not involve human participants&#x2019; data.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overall Patterns in Images</title><p>Images generated by AI in response to SUD&#x2013;related prompts revealed persistent stigmatizing patterns and limited representational diversity, with observable differences based on language used in the prompts, recovery framing, and the addition of guidelines. All investigators obtained similar images to the same prompts (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref> and <xref ref-type="supplementary-material" rid="app2">2</xref>). Findings are organized by guideline use and then according to the major finding. See <xref ref-type="table" rid="table1">Table 1</xref> for summary statistics of image content with and without guidelines.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Description of AI-generated images without and with guidelines.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Code</td><td align="left" valign="bottom">Without guidelines (n=42), n (%)</td><td align="left" valign="bottom">With guidelines (n=42), n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Number of persons</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>None</td><td align="left" valign="top">1 (2)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>One</td><td align="left" valign="top">41 (98)</td><td align="left" valign="top">6 (14)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Two or more</td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">36 (86)</td></tr><tr><td align="left" valign="top">Race/ethnicity (central figure)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>White</td><td align="left" valign="top">36 (86)</td><td align="left" valign="top">5 (12)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Black</td><td align="left" valign="top">2 (5)</td><td align="left" valign="top">35 (83)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Hispanic</td><td align="left" valign="top">2 (5)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Asian</td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Unclear</td><td align="left" valign="top">1 (2)</td><td align="left" valign="top">2 (5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No person</td><td align="left" valign="top">1 (2)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top">Gender (central figure)</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Woman</td><td align="left" valign="top">4 (10)</td><td align="left" valign="top">33 (79)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Man</td><td align="left" valign="top">37 (88)</td><td align="left" valign="top">8 (19)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Unclear</td><td align="left" valign="top">0 (0)</td><td align="left" valign="top">1 (2)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No person</td><td align="left" valign="top">1 (2)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top">Recovery Signifier<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Chip/token/medallion</td><td align="left" valign="top">7 (17)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other recovery signifier</td><td align="left" valign="top">14 (33)</td><td align="left" valign="top">6 (14)</td></tr><tr><td align="left" valign="top">Stigmatizing</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No criteria met</td><td align="left" valign="top">17 (40)</td><td align="left" valign="top">41 (98)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>1-2 criteria (potentially stigmatizing)</td><td align="left" valign="top">9 (21)</td><td align="left" valign="top">1 (2)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>3 or more criteria (highly stigmatizing)</td><td align="left" valign="top">16 (38)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top">Humanizing<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Happy or at peace</td><td align="left" valign="top">14 (33)</td><td align="left" valign="top">38 (90)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Active or social<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="top">1 (2)</td><td align="left" valign="top">17 (40)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Not mutually exclusive.</p></fn><fn id="table1fn2"><p><sup>b</sup>Not including images of socialization that suggest group therapy.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Images Without Guidelines</title><sec id="s3-2-1"><title>Person-First Language Has No Effect</title><p>Images of substance use generated without guidelines were dark, demographically narrow, and often stigmatizing&#x2014;even when using person-first language (<xref ref-type="table" rid="table2">Table 2</xref>). Prompts such as &#x201C;person with addiction&#x201D; and &#x201C;person with a substance use disorder&#x201D; generated somber imagery similar to nonperson-first prompts like &#x201C;addict.&#x201D; Images consistently featured stereotypical visuals (eg, a person in chains, a person injecting drugs in a public setting) consistent with harmful tropes (Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). 38% (n=16) of images met three or more stigmatizing criteria from existing SUD-related image guidelines and an additional 21% (n=9) met one or two criteria (<xref ref-type="table" rid="table1">Table 1</xref>). These representations may reinforce stigmatizing narratives around SUD, portraying individuals as isolated, distressed, or hopeless. Humanizing features were rare, with just 33% (n=14) of images showing someone &#x201C;happy or at peace,&#x201D; and just 2% (n=1) depicting active or social engagement.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Selected prompts, generated images, and text output.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Selected prompt</td><td align="left" valign="bottom">Without guidelines</td><td align="left" valign="bottom">With guidelines</td></tr></thead><tbody><tr><td align="left" valign="top">Please make an image of a person with a substance use disorder</td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig01.png"/></td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig02.png"/></td></tr><tr><td align="left" valign="top">Please make an image of an addict</td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig03.png"/></td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig04.png"/></td></tr><tr><td align="left" valign="top">Please make an image of a person in recovery from addiction</td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig05.png"/></td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig06.png"/></td></tr><tr><td align="left" valign="top">Please make an image of a person who has recovered from addiction</td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig07.png"/></td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig08.png"/></td></tr><tr><td align="left" valign="top">Please write a detailed prompt for a respectful and compassionate image of a person with a substance use disorder.</td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig09.png"/>&#x201C;A realistic, compassionate portrait of an adult individual living with a substance use disorder, shown in a thoughtful and humanizing moment. The setting is a modest, lived-in room&#x2014;possibly a small apartment or a community center&#x2014;bathed in soft natural light from a window. The person sits on a couch or bed, looking contemplatively out the window, evoking a sense of resilience, vulnerability, and hope. They wear casual clothes, and there may be small personal items nearby, like a journal, a photo, or a cup of tea&#x2014;suggesting a journey or story behind them. Their expression should be nuanced&#x2014;perhaps tired, but not defeated&#x2014;reflecting the complexity of their experience. The tone should be warm, respectful, and non-judgmental, avoiding stereotypes or sensationalism. Include visual cues of recovery or support, such as a self-help book, a folded blanket, or a recovery token, subtly integrated into the environment.&#x201D;</td><td align="left" valign="top"><graphic xlink:href="ai_v5i1e81977_fig10.png"/>&#x201C;A warm, respectful portrait of a Black woman in her early 30 s sitting in a sunlit community center, engaged in conversation with a peer support worker. She has a gentle smile, wearing casual but neat clothing, and her posture shows openness and confidence. The setting includes elements like informational brochures about recovery programs and a wall with community art, signaling a supportive environment. The scene subtly suggests that she is in recovery from opioid use disorder but avoids any direct visual cues of drug use or medicalization. The focus is on her strength, connection, and hope. The composition should emphasize dignity, diversity, and recovery, with natural light and a soft, hopeful color palette.&#x201D;</td></tr></tbody></table></table-wrap></sec></sec><sec id="s3-3"><title>Recovery-Focused Language Improves Images</title><p>Prompts incorporating recovery-related language (eg, &#x201C;a person in recovery from addiction&#x201D;) produced more humanizing results. Recovery was visually signaled through symbolic elements, with 17% (n=7) of images including a chip or token and 33% (n=14) referencing recovery more generally&#x2014;often through text embedded in the image (<xref ref-type="table" rid="table2">Table 2</xref>). Images implied a model of recovery aligned with 12-step programs, lacking depictions of alternative recovery modalities (eg, medication treatment) or social and occupational integration. Individuals rarely made eye contact with the viewer, possibly reinforcing themes of shame or isolation. A notable shift was observed between prompts using &#x201C;recovery&#x201D; versus &#x201C;recovered.&#x201D; While prompts using &#x201C;recovery&#x201D; yielded introspective and subdued portrayals, prompts using &#x201C;recovered&#x201D; generated brighter and more emotionally positive images.</p></sec><sec id="s3-4"><title>AI-Generated Prompts for Compassionate Imagery Improve Images</title><p>Using ChatGPT written prompts, created in response to explicitly asking ChatGPT to generate a prompt for &#x201C;a respectful and compassionate image&#x201D; of individuals with SUD, led to modest improvements in image tone and setting (<xref ref-type="table" rid="table2">Table 2</xref>). When &#x201C;recovery&#x201D; was included in the request, images were notably more positive, with warm lighting, open posture, and nonstigmatizing environments. However, subtle signs of stigma (eg, solemnity) persisted. Prompts focused on SUD without mention of recovery still produced images that could be perceived as stigmatizing.</p></sec><sec id="s3-5"><title>Images Lack Demographic Diversity</title><p>Images predominantly depicted White men and lacked meaningful racial or gender diversity across all prompts: 98% (n=41) featured a single person, and 88% (n=37) featured men. Most of the individuals depicted were White (86%, n=36), with Black and Hispanic individuals each appearing in only 5% (n=2) of images. White men were depicted in 81% (n=34) of images. Only 2% (n=1) of images were racially unclear, and one showed no person at all (Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p></sec><sec id="s3-6"><title>Images With Guidelines</title><sec id="s3-6-1"><title>Guidelines Improve Images Across All Prompts</title><p>When prompts were used in the guideline-informed custom GPT, several differences resulted. 86% (n=36) of images included two or more people&#x2014;suggesting greater emphasis on social context. Images were also far less stigmatizing, with 98% (n=41) meeting no stigma-related criteria. Humanizing content improved significantly: 90% (n=38) of images depicted someone &#x201C;happy or at peace,&#x201D; and 40% (n=17) showed individuals engaged in active or social behavior. However, recovery imagery remained narrow and limited to group meetings (Table S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Images avoided visual references to substance use history, which may aid normalization but risk minimizing the lived experience of SUD and recovery.</p></sec></sec><sec id="s3-7"><title>Guidelines Flip Image Demographics, but Still Lack Diversity</title><p>The guideline-informed custom GPT shifted demographic representation: Black individuals were now central in 83% (n=35) of the images, while White individuals appeared in only 12% (n=5). There were no Hispanic or Asian individuals featured (<xref ref-type="table" rid="table1">Table 1</xref>). Gender distribution also reversed: 79% (n=33) of central figures were women and only 19% (n=8) were men. This introduced new concerns around overrepresentation and tokenization, particularly about the concentration of Black women (74%, n=31) in stereotyped recovery scenarios.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Images Pose Challenges for Health Communication</title><p>Our findings suggest a default image association in ChatGPT and its training data that reflects and amplifies harmful societal biases about people with SUD. This bolsters prior research documenting stigmatizing AI depictions of health conditions ranging from psychiatric diagnoses to obesity [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. While our approach expands existing research via improved prompting, default images still raise concerns. Those who generate images may not vet output against guidelines, resulting in the inclusion of stigmatizing images in clinical and public health communication materials. The risk of using problematic images is heightened by evidence indicating that models may tell users that they are creating a &#x201C;respectful&#x201D; image of SUD, yet still produce a highly stigmatizing image [<xref ref-type="bibr" rid="ref28">28</xref>].</p><p>We explored multiple prompting strategies to improve default images of SUD, including person-first language, ChatGPT-generated prompts, and creating a custom GPT built on existing image guidelines. These iterations reveal that (1) the prompting approach plays a significant role in image output and (2) linguistic best practices for SUD communication do not currently improve image outputs.</p><p>Simply adopting evidence-based, linguistically appropriate terminology failed to prevent stigmatizing images. Person-first language does not appear to translate into less harmful imagery in current AI systems. This finding is notable considering that the small semantic variation between &#x201C;in recovery&#x201D; and &#x201C;recovered&#x201D; changed the tone of images, suggesting idiosyncrasies within the training data [<xref ref-type="bibr" rid="ref29">29</xref>]. By contrast, images from the custom GPT informed by guidelines were consistently less stigmatizing. This suggests that concrete, detailed descriptions of specific preferred and nonpreferred depictions (eg, stay away from images of drugs or paraphernalia) more reliably generate appropriate images compared to language that merely connotes respectful and nonstigmatizing depictions (eg, person with a SUD). This also explains why the ChatGPT-generated prompts, which were much more detailed than our initial prompts and those used by prior studies on AI-generated images, modestly improved images.</p><p>However, it should be stressed that even images generated using the guidelines-based custom GPT would require refinement to yield usable visuals, underscoring the need for prompt engineering and user education. For example, guideline-adherent images may contribute to unrealistic portrayals of SUD and recovery if not appropriately contextualized. Further, the limited demographic representation observed raised concerns both before and after guidelines, with most nonguideline images depicting White men and most guideline images depicting Black women. It is unclear if initial images reflect the model treating White men as the default person with SUD based on training data versus model tuning to avoid stigmatizing images of minoritized and historically marginalized groups [<xref ref-type="bibr" rid="ref30">30</xref>]. This highlights the impact of ambiguity in image generation. Unlike text output, images require the model to assume the race and gender of the person depicted [<xref ref-type="bibr" rid="ref30">30</xref>].</p><p>Although image guidelines exist, there is little peer-reviewed evidence on the impact of images on drug-related stigma or what ideal images would be [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Yet the ability to generate near infinite variations of images may help resolve a longstanding challenge for visual research by easing the creation of visual stimuli [<xref ref-type="bibr" rid="ref31">31</xref>]. Further work is needed to explore research applications for AI-generated images and to discern the ideal image composition for communication about SUD and recovery.</p><p>Our study has several limitations. First, we included images only from ChatGPT 4.o that were generated in 2025. Different models may generate different images based on training data and model tuning. Second, the custom GPT was specific to the set of guidelines uploaded. Third, we did not consider the impact of AI-generated images on SUD perceptions, which merits further exploration. Finally, as images are not independent observations drawn from a population, statistical analyses were not performed to confirm if the differences between images were statistically significant. Strengths of this work include turning off ChatGPT&#x2019;s memory feature and the use of prompt variations, with each prompt used verbatim multiple times and by multiple members of the research team, increasing robustness. Additionally, we provide the full list of prompts and all resulting images in the supplementary materials to allow for reproducibility and for tracking model changes over time.</p></sec><sec id="s4-2"><title>Conclusion</title><p>Baseline generative AI images of SUD and recovery are highly stigmatizing but can be modestly improved with concrete instructions and application of existing guidelines. Our findings highlight the importance of training health care and public health professionals on best practices for both communication about SUD and image-generation prompting.</p></sec></sec></body><back><ack><p>Generative AI (ChatGPT 4.o) was used to generate the images analyzed.</p></ack><notes><sec><title>Funding</title><p>This research did not receive any specific grant from funding agencies in the public, commercial, or not-for-profit sectors.</p></sec><sec><title>Data Availability</title><p>All data generated or analyzed during this study are included in this published article and its supplementary information files.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">SUD</term><def><p>substance use disorder</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bharel</surname><given-names>M</given-names> </name><name name-style="western"><surname>Auerbach</surname><given-names>J</given-names> </name><name name-style="western"><surname>Nguyen</surname><given-names>V</given-names> </name><name name-style="western"><surname>DeSalvo</surname><given-names>KB</given-names> </name></person-group><article-title>Transforming public health practice with generative artificial intelligence</article-title><source>Health Aff (Millwood)</source><year>2024</year><month>06</month><day>1</day><volume>43</volume><issue>6</issue><fpage>776</fpage><lpage>782</lpage><pub-id pub-id-type="doi">10.1377/hlthaff.2024.00050</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thomson</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Thomas</surname><given-names>RJ</given-names> </name><name name-style="western"><surname>Matich</surname><given-names>P</given-names> </name></person-group><article-title>Generative visual AI in news organizations: challenges, opportunities, perceptions, and policies</article-title><source>Digital Journalism</source><year>2025</year><month>11</month><day>26</day><volume>13</volume><issue>10</issue><fpage>1693</fpage><lpage>1714</lpage><pub-id pub-id-type="doi">10.1080/21670811.2024.2331769</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>DeSalvo</surname><given-names>KB</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>YC</given-names> </name><name name-style="western"><surname>Harris</surname><given-names>A</given-names> </name><name name-style="western"><surname>Auerbach</surname><given-names>J</given-names> </name><name name-style="western"><surname>Koo</surname><given-names>D</given-names> </name><name name-style="western"><surname>O&#x2019;Carroll</surname><given-names>P</given-names> </name></person-group><article-title>Public health 3.0: a call to action for public health to meet the challenges of the 21st century</article-title><source>Prev Chronic Dis</source><year>2017</year><month>09</month><day>7</day><volume>14</volume><fpage>E78</fpage><pub-id pub-id-type="doi">10.5888/pcd14.170017</pub-id><pub-id pub-id-type="medline">28880837</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huston</surname><given-names>JC</given-names> </name><name name-style="western"><surname>Kaminski</surname><given-names>N</given-names> </name></person-group><article-title>A picture worth a thousand words, created with one sentence: using artificial intelligence-created art to enhance medical education</article-title><source>ATS Sch</source><year>2023</year><month>06</month><volume>4</volume><issue>2</issue><fpage>145</fpage><lpage>151</lpage><pub-id pub-id-type="doi">10.34197/ats-scholar.2022-0141PS</pub-id><pub-id pub-id-type="medline">37533539</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Monteith</surname><given-names>S</given-names> </name><name name-style="western"><surname>Glenn</surname><given-names>T</given-names> </name><name name-style="western"><surname>Geddes</surname><given-names>JR</given-names> </name><name name-style="western"><surname>Whybrow</surname><given-names>PC</given-names> </name><name name-style="western"><surname>Achtyes</surname><given-names>E</given-names> </name><name name-style="western"><surname>Bauer</surname><given-names>M</given-names> </name></person-group><article-title>Artificial intelligence and increasing misinformation</article-title><source>Br J Psychiatry</source><year>2024</year><month>02</month><volume>224</volume><issue>2</issue><fpage>33</fpage><lpage>35</lpage><pub-id pub-id-type="doi">10.1192/bjp.2023.136</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Flathers</surname><given-names>M</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>G</given-names> </name><name name-style="western"><surname>Wagner</surname><given-names>E</given-names> </name><name name-style="western"><surname>Fisher</surname><given-names>CE</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name></person-group><article-title>AI depictions of psychiatric diagnoses: a preliminary study of generative image outputs in Midjourney V.6 and DALL-E 3</article-title><source>BMJ Ment Health</source><year>2024</year><month>12</month><day>4</day><volume>27</volume><issue>1</issue><fpage>e301298</fpage><pub-id pub-id-type="doi">10.1136/bmjment-2024-301298</pub-id><pub-id pub-id-type="medline">39632121</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Papazova</surname><given-names>I</given-names> </name><name name-style="western"><surname>Hasan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Khorikian-Ghazari</surname><given-names>N</given-names> </name></person-group><article-title>Biased AI generated images of mental illness: does AI adopt our stigma?</article-title><source>Eur Arch Psychiatry Clin Neurosci</source><year>2025</year><month>12</month><volume>275</volume><issue>8</issue><fpage>2563</fpage><lpage>2565</lpage><pub-id pub-id-type="doi">10.1007/s00406-025-01998-x</pub-id><pub-id pub-id-type="medline">40202591</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Warren</surname><given-names>J</given-names> </name><name name-style="western"><surname>Weiss</surname><given-names>GM</given-names> </name><name name-style="western"><surname>Martinez</surname><given-names>F</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>Y</given-names> </name></person-group><article-title>Decoding fatphobia: examining anti-fat and pro-thin bias in AI-generated images</article-title><source>Findings of the Association for Computational Linguistics</source><fpage>4724</fpage><lpage>4736</lpage><pub-id pub-id-type="doi">10.18653/v1/2025.findings-naacl.266</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sutera</surname><given-names>P</given-names> </name><name name-style="western"><surname>Bhatia</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>T</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Brown</surname><given-names>A</given-names> </name><name name-style="western"><surname>Jagsi</surname><given-names>R</given-names> </name></person-group><article-title>Generative AI in medicine: pioneering progress or perpetuating historical inaccuracies? Cross-Sectional Study Evaluating Implicit Bias</article-title><source>JMIR AI</source><year>2025</year><month>06</month><day>24</day><volume>4</volume><issue>1</issue><fpage>e56891</fpage><pub-id pub-id-type="doi">10.2196/56891</pub-id><pub-id pub-id-type="medline">40605830</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tsai</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Kiang</surname><given-names>MV</given-names> </name><name name-style="western"><surname>Barnett</surname><given-names>ML</given-names> </name><etal/></person-group><article-title>Stigma as a fundamental hindrance to the United States opioid overdose crisis response</article-title><source>PLoS Med</source><year>2019</year><month>11</month><volume>16</volume><issue>11</issue><fpage>e1002969</fpage><pub-id pub-id-type="doi">10.1371/journal.pmed.1002969</pub-id><pub-id pub-id-type="medline">31770387</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Parish</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Feaster</surname><given-names>DJ</given-names> </name><name name-style="western"><surname>Pollack</surname><given-names>HA</given-names> </name><etal/></person-group><article-title>Healthcare provider stigma toward patients with substance use disorders</article-title><source>Addiction</source><year>2025</year><month>10</month><volume>120</volume><issue>10</issue><fpage>2005</fpage><lpage>2019</lpage><pub-id pub-id-type="doi">10.1111/add.70122</pub-id><pub-id pub-id-type="medline">40702596</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Werremeyer</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mosher</surname><given-names>S</given-names> </name><name name-style="western"><surname>Eukel</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Pharmacists&#x2019; stigma toward patients engaged in opioid misuse: when &#x201C;social distance&#x201D; does not mean disease prevention</article-title><source>Subst Abuse</source><year>2021</year><month>10</month><volume>42</volume><issue>4</issue><fpage>919</fpage><lpage>926</lpage><pub-id pub-id-type="doi">10.1080/08897077.2021.1900988</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Louie</surname><given-names>DL</given-names> </name><name name-style="western"><surname>Assefa</surname><given-names>MT</given-names> </name><name name-style="western"><surname>McGovern</surname><given-names>MP</given-names> </name></person-group><article-title>Attitudes of primary care physicians toward prescribing buprenorphine: a narrative review</article-title><source>BMC Fam Pract</source><year>2019</year><month>11</month><day>15</day><volume>20</volume><issue>1</issue><fpage>157</fpage><pub-id pub-id-type="doi">10.1186/s12875-019-1047-z</pub-id><pub-id pub-id-type="medline">31729957</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Norris</surname><given-names>EM</given-names> </name></person-group><article-title>The constructive use of images in medical teaching: a literature review</article-title><source>JRSM Short Rep</source><year>2012</year><month>05</month><volume>3</volume><issue>5</issue><fpage>33</fpage><pub-id pub-id-type="doi">10.1258/shorts.2012.011158</pub-id><pub-id pub-id-type="medline">22666530</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="web"><source>Alcohol Tobacco and other Drugs Council Tasmania. ATDC Image Guidelines</source><access-date>2025-07-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://atdc.org.au/wp-content/uploads/2024/01/ATDC-Image-Guidelines-ATOD.pdf">https://atdc.org.au/wp-content/uploads/2024/01/ATDC-Image-Guidelines-ATOD.pdf</ext-link></comment></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="web"><article-title>Images matter: a guide to reducing stigma in visual media</article-title><source>Denver Health CAM Academy</source><year>2025</year><access-date>2025-07-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.denverhealth.org/sites/default/files/2025-06/CAM2504-56-Images-Matter-Flyer_C.pdf">https://www.denverhealth.org/sites/default/files/2025-06/CAM2504-56-Images-Matter-Flyer_C.pdf</ext-link></comment></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><source>Empathy Lens Stigma in images: How to avoid stereotypes and humanize people who use drugs</source><access-date>2025-07-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://empathylens.org/wp-content/uploads/stigma-images-brochure.pdf">https://empathylens.org/wp-content/uploads/stigma-images-brochure.pdf</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hulsey</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zawislak</surname><given-names>K</given-names> </name><name name-style="western"><surname>Sawyer-Morris</surname><given-names>G</given-names> </name><name name-style="western"><surname>Earnshaw</surname><given-names>V</given-names> </name></person-group><article-title>Stigmatizing imagery for substance use disorders: a qualitative exploration</article-title><source>Health Justice</source><year>2023</year><month>07</month><day>4</day><volume>11</volume><issue>1</issue><fpage>28</fpage><pub-id pub-id-type="doi">10.1186/s40352-023-00229-6</pub-id><pub-id pub-id-type="medline">37402079</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="web"><article-title>Creating a repository of non-stigmatizing visual images on substance use</article-title><source>The Ontario Network of People who Use Drugs, Public Health Ontario, Healthcare Human Factors</source><year>2024</year><month>03</month><access-date>2025-07-05</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.catie.ca/sites/default/files/2025-02/report-repository-non-stigmatizing-visual-images-substance-use-EN-2024.pdf">https://www.catie.ca/sites/default/files/2025-02/report-repository-non-stigmatizing-visual-images-substance-use-EN-2024.pdf</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hom</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Heley</surname><given-names>K</given-names> </name><name name-style="western"><surname>Bandara</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kennedy-Hendricks</surname><given-names>A</given-names> </name></person-group><article-title>The impact of news photographs on drug-related stigma: a randomized message testing experiment in a National Sample of US Adults</article-title><source>Prev Med</source><year>2025</year><month>06</month><volume>195</volume><fpage>108293</fpage><pub-id pub-id-type="doi">10.1016/j.ypmed.2025.108293</pub-id><pub-id pub-id-type="medline">40311941</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ashford</surname><given-names>RD</given-names> </name><name name-style="western"><surname>Brown</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Curtis</surname><given-names>B</given-names> </name></person-group><article-title>Substance use, recovery, and linguistics: the impact of word choice on explicit and implicit bias</article-title><source>Drug Alcohol Depend</source><year>2018</year><month>08</month><day>1</day><volume>189</volume><fpage>131</fpage><lpage>138</lpage><pub-id pub-id-type="doi">10.1016/j.drugalcdep.2018.05.005</pub-id><pub-id pub-id-type="medline">29913324</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kelly</surname><given-names>JF</given-names> </name><name name-style="western"><surname>Westerhoff</surname><given-names>CM</given-names> </name></person-group><article-title>Does it matter how we refer to individuals with substance-related conditions? A randomized study of two commonly used terms</article-title><source>Int J Drug Policy</source><year>2010</year><month>05</month><volume>21</volume><issue>3</issue><fpage>202</fpage><lpage>207</lpage><pub-id pub-id-type="doi">10.1016/j.drugpo.2009.10.010</pub-id><pub-id pub-id-type="medline">20005692</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Creswell</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Clark</surname><given-names>VLP</given-names> </name></person-group><source>Designing and Conducting Mixed Methods Research</source><year>2018</year><edition>3</edition><publisher-name>SAGE Publications, Inc</publisher-name></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Yin</surname><given-names>RK</given-names> </name></person-group><source>Case Study Research and Applications: Design and Methods</source><year>2018</year><edition>6</edition><publisher-name>SAGE Publications, Inc</publisher-name></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Turner</surname><given-names>SF</given-names> </name><name name-style="western"><surname>Cardinal</surname><given-names>LB</given-names> </name><name name-style="western"><surname>Burton</surname><given-names>RM</given-names> </name></person-group><article-title>Research design for mixed methods: a triangulation-based framework and roadmap</article-title><source>Organizational Research Methods</source><year>2017</year><volume>20</volume><issue>2</issue><fpage>243</fpage><lpage>267</lpage><pub-id pub-id-type="doi">10.1177/1094428115610808</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bessette</surname><given-names>LG</given-names> </name><name name-style="western"><surname>Hauc</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Danckers</surname><given-names>H</given-names> </name><name name-style="western"><surname>Atayde</surname><given-names>A</given-names> </name><name name-style="western"><surname>Saitz</surname><given-names>R</given-names> </name></person-group><article-title>The associated press stylebook changes and the use of addiction-related stigmatizing terms in news media</article-title><source>Subst Abus</source><year>2022</year><volume>43</volume><issue>1</issue><fpage>127</fpage><lpage>130</lpage><pub-id pub-id-type="doi">10.1080/08897077.2020.1748167</pub-id><pub-id pub-id-type="medline">32348190</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Landis</surname><given-names>JR</given-names> </name><name name-style="western"><surname>Koch</surname><given-names>GG</given-names> </name></person-group><article-title>The measurement of observer agreement for categorical data</article-title><source>Biometrics</source><year>1977</year><month>03</month><volume>33</volume><issue>1</issue><fpage>159</fpage><pub-id pub-id-type="doi">10.2307/2529310</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hom</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Heley</surname><given-names>K</given-names> </name><name name-style="western"><surname>Laestadius</surname><given-names>LI</given-names> </name></person-group><article-title>When words and images diverge: challenges of AI-generated drug use representations</article-title><source>Int J Drug Policy</source><year>2026</year><month>01</month><volume>147</volume><fpage>105104</fpage><pub-id pub-id-type="doi">10.1016/j.drugpo.2025.105104</pub-id><pub-id pub-id-type="medline">41351926</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Best</surname><given-names>D</given-names> </name><name name-style="western"><surname>Andersson</surname><given-names>C</given-names> </name><name name-style="western"><surname>Irving</surname><given-names>J</given-names> </name><name name-style="western"><surname>Edwards</surname><given-names>M</given-names> </name></person-group><article-title>Recovery identity and wellbeing: is it better to be &#x2018;recovered&#x2019; or &#x2018;in recovery&#x2019;?</article-title><source>J Groups Addict Recover</source><year>2017</year><month>01</month><day>2</day><volume>12</volume><issue>1</issue><fpage>27</fpage><lpage>36</lpage><pub-id pub-id-type="doi">10.1080/1556035X.2016.1272071</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Ananya</surname></name></person-group><article-title>AI image generators often give racist and sexist results: can they be fixed?</article-title><source>Nature New Biol</source><year>2024</year><month>03</month><day>28</day><access-date>2026-02-09</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.nature.com/articles/d41586-024-00674-9">https://www.nature.com/articles/d41586-024-00674-9</ext-link></comment></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Heley</surname><given-names>K</given-names> </name><name name-style="western"><surname>Gaysynsky</surname><given-names>A</given-names> </name><name name-style="western"><surname>King</surname><given-names>AJ</given-names> </name></person-group><article-title>Missing the bigger picture: the need for more research on visual health misinformation</article-title><source>Sci Commun</source><year>2022</year><month>08</month><volume>44</volume><issue>4</issue><fpage>514</fpage><lpage>527</lpage><pub-id pub-id-type="doi">10.1177/10755470221113833</pub-id><pub-id pub-id-type="medline">36082150</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Prompts and images without guidelines.</p><media xlink:href="ai_v5i1e81977_app1.docx" xlink:title="DOCX File, 6395 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Prompts and images with guidelines.</p><media xlink:href="ai_v5i1e81977_app2.docx" xlink:title="DOCX File, 37761 KB"/></supplementary-material></app-group></back></article>