<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR AI</journal-id>
      <journal-title>JMIR AI</journal-title>
      <issn pub-type="epub">2817-1705</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v4i1e68144</article-id>
      <article-id pub-id-type="pmid">40388838</article-id>
      <article-id pub-id-type="doi">10.2196/68144</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>ChatGPT-4–Driven Liver Ultrasound Radiomics Analysis: Diagnostic Value and Drawbacks in a Comparative Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>El Emam</surname>
            <given-names>Khaled</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Montin</surname>
            <given-names>Eros</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Nanthasamroeng</surname>
            <given-names>Natthapong</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Sultan</surname>
            <given-names>Laith R</given-names>
          </name>
          <degrees>MD, MBMI</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Radiology</institution>
            <institution>Children's Hospital of Philadelphia</institution>
            <addr-line>734 Schuylkill Ave</addr-line>
            <addr-line>Philadelphia, PA, 19146</addr-line>
            <country>United States</country>
            <phone>1 267 425 4143</phone>
            <email>sultanl@chop.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2632-4164</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Venkatakrishna</surname>
            <given-names>Shyam Sunder B</given-names>
          </name>
          <degrees>MBBS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1803-1740</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Anupindi</surname>
            <given-names>Sudha A</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7784-3108</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Andronikou</surname>
            <given-names>Savvas</given-names>
          </name>
          <degrees>MBBch, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1015-9790</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Acord</surname>
            <given-names>Michael R</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8575-0983</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Otero</surname>
            <given-names>Hansel J</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4626-0732</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Darge</surname>
            <given-names>Kassa</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8989-6485</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Sehgal</surname>
            <given-names>Chandra M</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8811-1930</ext-link>
        </contrib>
        <contrib id="contrib9" contrib-type="author">
          <name name-style="western">
            <surname>Holmes</surname>
            <given-names>John H</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2167-3602</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Radiology</institution>
        <institution>Children's Hospital of Philadelphia</institution>
        <addr-line>Philadelphia, PA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Radiology</institution>
        <institution>University of Pennsylvania</institution>
        <addr-line>Philadelphia</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Biostatistics, Epidemiology and Informatics</institution>
        <institution>University of Pennsylvania</institution>
        <addr-line>Philadelphia, PA</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Laith R Sultan <email>sultanl@chop.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>30</day>
        <month>6</month>
        <year>2025</year>
      </pub-date>
      <volume>4</volume>
      <elocation-id>e68144</elocation-id>
      <history>
        <date date-type="received">
          <day>29</day>
          <month>10</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>15</day>
          <month>12</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>28</day>
          <month>2</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>18</day>
          <month>5</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Laith R Sultan, Shyam Sunder B Venkatakrishna, Sudha A Anupindi, Savvas Andronikou, Michael R Acord, Hansel J Otero, Kassa Darge, Chandra M Sehgal, John H Holmes. Originally published in JMIR AI (https://ai.jmir.org), 30.06.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on https://www.ai.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://ai.jmir.org/2025/1/e68144" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Artificial intelligence (AI) is transforming medical imaging, with large language models such as ChatGPT-4 emerging as potential tools for automated image interpretation. While AI-driven radiomics has shown promise in diagnostic imaging, the efficacy of ChatGPT-4 in liver ultrasound analysis remains largely unexamined.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to evaluate the capability of ChatGPT-4 in liver ultrasound radiomics, specifically its ability to differentiate fibrosis, steatosis, and normal liver tissue, compared with conventional image analysis software.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>Seventy grayscale ultrasound images from a preclinical liver disease model, including fibrosis (n=31), fatty liver (n=18), and normal liver (n=21), were analyzed. ChatGPT-4 extracted texture features, which were compared with those obtained using interactive data language (IDL), a traditional image analysis software. One-way ANOVA was used to identify statistically significant features differentiating liver conditions, and logistic regression models were used to assess diagnostic performance.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>ChatGPT-4 extracted 9 key textural features—echo intensity, heterogeneity, skewness, kurtosis, contrast, homogeneity, dissimilarity, angular second momentum, and entropy—all of which significantly differed across liver conditions (<italic>P</italic>&lt;.05). Among individual features, echo intensity achieved the highest <italic>F</italic><sub>1</sub>-score (0.85). When combined, ChatGPT-4 attained 76% accuracy and 83% sensitivity in classifying liver disease. Receiver operating characteristic analysis demonstrated strong discriminatory performance, with area under the curve values of 0.75 for fibrosis, 0.87 for normal liver, and 0.97 for steatosis. Compared with IDL image analysis software, ChatGPT-4 exhibited slightly lower sensitivity (0.83 vs 0.89) but showed moderate correlation (<italic>r</italic>=0.68, <italic>P</italic>&lt;.001) with IDL-derived features. However, it significantly outperformed IDL in processing efficiency, reducing analysis time by 40%, and highlighting its potential for high throughput radiomic analysis.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Despite slightly lower sensitivity than IDL, ChatGPT-4 demonstrated high feasibility for ultrasound radiomics, offering faster processing, high-throughput analysis, and automated multi-image evaluation. These findings support its potential integration into AI-driven imaging workflows, with further refinements needed to enhance feature reproducibility and diagnostic accuracy.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>ChatGPT-4</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>large language models, radiomics</kwd>
        <kwd>ultrasound imaging</kwd>
        <kwd>quantitative image analysis</kwd>
        <kwd>liver disease, radiology workflow</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>In recent years, advancements in artificial intelligence (AI) have transformed various fields, and one notable application is in the realm of medical imaging [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref6">6</xref>]. AI holds significant potential in revolutionizing the field of medical imaging, as it can automate numerous tasks and even surpass human abilities in specific areas, whether it be in diagnostic or interventional applications [<xref ref-type="bibr" rid="ref7">7</xref>]. Integrating AI with ultrasound imaging is particularly compelling. Unlike other imaging modalities, ultrasound relies heavily on human operators [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. This dependence on human expertise presents unique challenges, especially with the growing use of portable ultrasound devices. These devices are increasingly used by a diverse range of health care providers, including nonradiologists, who may have varying levels of training and experience [<xref ref-type="bibr" rid="ref10">10</xref>]. AI algorithms offer a powerful solution to mitigate the challenges associated with operator dependency in ultrasound imaging. These algorithms can play a crucial role in the automated detection of anomalies and significant findings, providing not only descriptive analysis but also valuable diagnostic guidance [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref13">13</xref>]. This capability is particularly beneficial for less experienced operators or in situations where expert radiologists are not readily available in regions with limited medical resources. The integration of AI in ultrasound imaging can lead to more accurate and efficient diagnostic processes, reducing the likelihood of human error and improving patient outcomes [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref17">17</xref>].</p>
      <p>ChatGPT is an advanced and powerful AI natural language processing model developed by OpenAI and was designed to comprehend and generate human-like text responses [<xref ref-type="bibr" rid="ref18">18</xref>]. Having been extensively trained on a diverse corpus of data, ChatGPT has cultivated the capacity to grasp context, acquire knowledge from examples, and produce cohesive responses [<xref ref-type="bibr" rid="ref19">19</xref>]. Consequently, it has evolved into a versatile tool applicable to a wide array of uses, including health care and medical imaging [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]. In health care, its capacity to process and interpret vast amounts of information can support medical diagnostics, patient communication, and research. The latest version, ChatGPT-4, expands its ability to multimodal interactions, including image processing and potential capabilities in audio and video formats [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>]. This enhancement is especially beneficial in health care, where it can analyze medical imagery, assist in creating educational materials, and offer visually descriptive assistance in patient care. By integrating advanced image analysis and generation, ChatGPT-4 stands poised to transform how AI supports health care professionals, offering tools for more accurate diagnoses, treatment planning, and patient engagement through rich, interactive media.</p>
      <p>In this study, we explore the potential of ChatGPT-4 in ultrasound imaging, particularly its capabilities in radiomics analysis for detailed tissue texture characterization. We focus on using ChatGPT-4–based radiomics to detect 3 distinct liver tissue types—normal, fibrotic, and fatty liver—using ultrasound images. To address challenges related to clinical data security, patient privacy, and ethical compliance, the liver ultrasound images in our study were sourced from an animal model. We then compared the findings generated by ChatGPT-4 with those obtained from conventional image analysis software. Our exploration highlights the potential of ChatGPT-4 to enhance research efforts and future clinical applications by improving the accuracy of quantitative image analysis.</p>
      <p>Beyond radiomics analysis, we evaluated ChatGPT-4 as a tool for distinguishing normal from abnormal cases based on imaging findings. We aimed to demonstrate its capability as a supportive tool in clinical settings. Such a tool could significantly reduce the workload of radiologists by efficiently filtering out normal cases, allowing them to focus their expertise on more complex and abnormal cases. This expanded exploration highlights the promising role of ChatGPT-4 in enhancing diagnostic accuracy and supporting clinical decision-making in liver disease detection.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Image Data Acquisition</title>
        <p>Seventy B-mode grayscale ultrasound images acquired from validated rat liver disease models [<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref32">32</xref>] were used for analysis. The images were distributed across 3 categories of liver health: fibrosis (n=31), steatosis (fatty liver) (n=18), and normal (n=21). To maintain consistency and reliability in the analysis, the imaging parameters were standardized, including transducer frequency, gain settings, imaging depth, focus, and dynamic range. These standardizations ensured that the liver tissue’s echogenicity and overall image quality were consistent across all samples, allowing for accurate comparisons between the different health states. Additionally, each analysis focused on a single image depicting a section of the right lobe of the liver. The right lobe was chosen due to its larger size and easier accessibility, which provided a more representative and consistent area for imaging and subsequent histopathological validation. The liver pathology in these images was validated with histopathology, further ensuring the accuracy of the ultrasound-based categorization.</p>
      </sec>
      <sec>
        <title>Ultrasound Image Analysis by ChatGPT-4</title>
        <sec>
          <title>Overview</title>
          <p>We leveraged the advanced capabilities of ChatGPT-4 for radiomics analysis of ultrasound images. ChatGPT-4 was used to select regions of interest (ROIs), extract radiomic features, and classify liver disease conditions. These critical steps are depicted in <xref rid="figure1" ref-type="fig">Figure 1</xref>.</p>
          <fig id="figure1" position="float">
            <label>Figure 1</label>
            <caption>
              <p>ChatGPT-4–assisted liver ultrasound image radiomics analysis workflow. The image illustrates the stepwise process of liver ultrasound texture analysis using ChatGPT-4. The process begins with uploading the image and preparation for analysis (query 1), where ChatGPT-4 performs texture analysis based on a selected ROI. In query 2, the user verifies and corrects the ROI selection. The ChatGPT-4 interface allows the user to refine the ROI to ensure accurate analysis. Once confirmed, the system proceeds to apply the same process to a series of images (query 3). Feature extraction details the analysis outputs, including texture metrics such as mean, variance, skewness, kurtosis, energy, and entropy, which provide insights into pixel intensity distribution and texture uniformity within the selected ROI. Figure prepared by Brittany Bennett, CMI. ROI: region of interest.</p>
            </caption>
            <graphic xlink:href="ai_v4i1e68144_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Region of Interest Delineation</title>
          <p>The first critical step involved selecting a region of interest (ROI) within the liver tissue depicted in each ultrasound image (<xref rid="figure1" ref-type="fig">Figures 1</xref>B and 1C). ROIs were automatically defined using ChatGPT-4’s advanced algorithms. Upon receiving the query, ChatGPT-4 initially proposed an ROI based on its automated analysis of the image, highlighting a region that it determined to be representative of the liver parenchyma. Users then refined these suggestions to ensure alignment with clinical standards, making adjustments as necessary to ensure that the selected area was optimal for analysis meticulously excluding artifacts such as vascular structures, acoustic shadows, and reverberation. This interactive process allowed for fine-tuning of the ROI, combining the computational efficiency of ChatGPT-4 with the expert judgment of the user. Once the ROIs were verified for accuracy in an analyzed image, they were replicated across 10 subsequent ultrasound images, which were then uploaded for subsequent radiomics analysis using a batch processing approach (<xref rid="figure1" ref-type="fig">Figure 1</xref>). Having liver images captured consistently in the same plane and region facilitated the reproducibility of ROI placement across the images. This method significantly enhanced the efficiency of our analysis, allowing for a more comprehensive assessment of liver tissue samples</p>
        </sec>
        <sec>
          <title>Feature Extraction</title>
          <p>Feature extraction was conducted in batches of 10 ultrasound images, the maximum allowed by ChatGPT-4, requiring multiple sessions to analyze all 70 cases. However, conducting analyses across different sessions introduced variability—some features were occasionally omitted, while others appeared inconsistently across sessions. To mitigate this session-dependent variability and ensure consistency in feature extraction, we implemented a standardized approach. At the beginning of each session, we carefully refined the prompts provided to ChatGPT-4 to align with previously extracted features (Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Missing features were explicitly requested, and any inconsistently appearing features were excluded. If ChatGPT-4 returned incomplete or inconsistent features, prompts were reissued or clarified until the correct output was obtained. Our final analysis included only features that were consistently and reliably extracted across all sessions. This approach minimizes session-to-session variation while maintaining reproducibility across users.</p>
          <p>ChatGPT-4 extracted a comprehensive set of radiomic features to characterize liver tissue texture [<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref35">35</xref>]. These included first-order statistics and second-order texture features. First-order statistics are quantitative measures such as mean intensity, variance (heterogeneity), skewness, and kurtosis, reflecting pixel intensity distribution. Second-order texture features are derived from the gray-level co-occurrence matrix (GLCM), and these features include contrast, homogeneity, entropy, and angular second momentum (ASM), providing deep insights into spatial relationships and textural heterogeneity within the ROI.</p>
        </sec>
        <sec>
          <title>Machine Learning for Feature Model Assessment</title>
          <p>The extracted radiomic features were used to develop a diagnostic model based on logistic regression, a method selected for its interpretability and clinical relevance. The model was configured with L2 regularization, and the regularization strength parameter (C) was optimized through grid search over a predefined range [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. The liblinear solver was used for its suitability with small datasets, and the maximum number of iterations was set to 1000 to ensure model convergence. The dataset was divided into training (60%), testing (20%), and validation (20%) subsets using stratified random sampling to maintain a balanced representation across liver disease categories (Table S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Hyperparameter tuning was performed using a grid search to optimize model performance. Specifically, the regularization strength parameter (C) in the logistic regression model was adjusted to balance model fit and prevent overfitting [<xref ref-type="bibr" rid="ref37">37</xref>]. A range of C values (eg, 0.001 to 100) was evaluated, and the optimal configuration was selected based on performance on the test set. To maintain methodological rigor, the test set was used exclusively during hyperparameter tuning, while the validation set was reserved for final model evaluation. The 3-way split ensured an unbiased assessment of model generalizability. Key metrics, including accuracy, sensitivity, specificity, and the area under the receiver operating characteristic (ROC) curve (AUC), were used to quantify diagnostic precision.</p>
        </sec>
        <sec>
          <title>Ultrasound Image Analysis by Interactive Data Language–Based Software</title>
          <p>Concurrently, the same ultrasound images were analyzed using an established interactive data language (IDL)–based tool designed for image analysis [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. For this analysis, ROIs within the liver were manually defined using a specialized tool, which ensured the precise selection of the target areas based on the same selection criteria mentioned earlier. ROI delineation was performed manually by expert users, ensuring the precise inclusion of clinically relevant areas and the exclusion of artifacts. The ROIs were selected to resemble the same areas selected using ChatGPT-4. Following that, texture features describing the first-order and second-order histograms were extracted from ROIs. The same feature extraction and logistic regression methodology described above was applied, allowing for a direct comparison of the 2 approaches.</p>
        </sec>
        <sec>
          <title>Evaluating ChatGPT-4 for Imaging Findings–Based Diagnosis</title>
          <p>To explore the potential of ChatGPT-4 as a tool for distinguishing normal from abnormal liver cases, we conducted an experiment involving liver ultrasound images representing various conditions. We uploaded these images to ChatGPT-4 and tasked it with providing detailed descriptions of the findings and possible diagnoses for each image (<xref rid="figure2" ref-type="fig">Figure 2</xref>). ChatGPT-4’s output included comprehensive imaging findings that described the characteristics of the liver tissue and suggested potential diagnoses based on these observations.</p>
          <p>Following this, we compared the diagnoses provided by ChatGPT-4 with the actual diagnoses to assess its diagnostic performance in identifying liver pathology. This involved calculating metrics such as sensitivity, specificity, and overall accuracy to determine how well ChatGPT-4 could identify normal and abnormal cases.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>ChatGPT-4–assisted liver ultrasound image diagnosis and report generation workflow. The figure depicts the workflow of using ChatGPT-4 for generating liver ultrasound image findings, possible diagnoses, and detailed reports. In query 1, images are uploaded for analysis, and ChatGPT-4 provides initial findings and potential diagnoses based on visual characteristics, such as liver parenchyma echotexture and the presence of lesions. In query 2, ChatGPT-4 generates a detailed report and impression, summarizing the clinical interpretation of the ultrasound images. Each image is examined for hepatic abnormalities, including potential cysts, signs of fibrosis, or normal liver architecture, with impressions supporting clinical correlation or further diagnostic imaging recommendations. This stepwise approach demonstrates ChatGPT-4’s ability to assist in diagnostic interpretations and report generation for liver ultrasound studies, streamlining clinical workflows and enhancing diagnostic accuracy.</p>
            </caption>
            <graphic xlink:href="ai_v4i1e68144_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
      <sec>
        <title>Statistical Analysis</title>
        <p>To interpret the differences in ultrasound texture features among the 3 liver health categories, we calculated the mean values and SEs. A 1-way ANOVA was conducted to identify any statistical differences across the study groups.</p>
        <p>When comparing 2 groups, the Shapiro-Wilk test was used to assess normality. If the data did not meet the normality assumption, the Mann-Whitney test was applied to determine significance, otherwise, statistical significance was evaluated using 2-tailed paired Student <italic>t</italic> tests, with a threshold of <italic>P</italic>&lt;.05.</p>
        <p>Diagnostic performance of individual features, and combined, including sensitivity, specificity, accuracy, and <italic>F</italic><sub>1</sub>-score were calculated. To support the visualization of multiclass separability, an additional exploratory analysis was performed using a decision tree classifier. A one-vs-rest classification scheme was used to generate ROC curves and compute AUC values for each class: fibrosis, normal, and steatosis. In addition, the intraclass correlation coefficient (ICC) analysis was performed between 2 observers to assess the reproducibility of ChatGPT-derived features. All analyses were performed using MedCalc software (version 19.0.5; MedCalc Software Ltd).</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Multiclass Liver Disease Classification by ChatGPT-4–Based Ultrasound Radiomics</title>
        <sec>
          <title>Identification of Key Features</title>
          <p>The ultrasound radiomics data processed by ChatGPT-4 has provided significant insights into the textural characteristics associated with various liver diseases (<xref rid="figure3" ref-type="fig">Figure 3</xref>). An ANOVA analysis identified 9 key textural features (from 10 features studied)—echo intensity, heterogeneity, skewness, kurtosis, contrast, homogeneity, dissimilarity, ASM, and entropy—as significantly varying among different liver conditions.</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>This figure presents the distribution of normalized texture features extracted from liver ultrasound images using ChatGPT-4, comparing 3 diagnostic groups: fibrosis, normal liver, and steatosis. (A) Plot 1 displays first-order histogram features, including echointensity, heterogeneity, kurtosis, and skewness. Fibrotic livers exhibit the highest echogenicity, followed by steatotic livers, both exceeding normal liver levels. Additionally, fibrosis is characterized by increased heterogeneity, whereas steatosis appears more homogeneous. (B) Plot 2 illustrates higher-order texture features, including entropy, contrast, dissimilarity, homogeneity, and ASM. Fibrosis is associated with greater contrast and dissimilarity, alongside reduced ASM, reflecting increased microstructural irregularity. Conversely, normal liver tissue demonstrates higher ASM and homogeneity, indicating a more uniform texture. ASM: angular second moment.</p>
            </caption>
            <graphic xlink:href="ai_v4i1e68144_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Predictive Performance of Individual Features</title>
          <p>Further analysis revealed varying degrees of accuracy, sensitivity, and specificity for the identified imaging features across different metrics (<xref ref-type="table" rid="table1">Table 1</xref>). The accuracy of these features ranged from 0.48 to 0.62, with echointensity and entropy exhibiting the highest accuracy at 0.62. Specificity and sensitivity also varied, with echointensity showing a high specificity of 0.62 and entropy demonstrating a lower specificity at 0.42. Heterogeneity and skewness presented moderate accuracy levels at 0.57, with heterogeneity having slightly higher sensitivity. Energy stood out for its specificity at 0.66, while ASM, despite having the lowest sensitivity at 0.33, exhibited the highest specificity at 0.67. When these features were combined, the overall accuracy improved to 0.76, with a sensitivity of 0.83. An analysis of feature-wise <italic>F</italic><sub>1</sub>-scores revealed also variability in their predictive contributions (<xref ref-type="table" rid="table1">Table 1</xref>). Echo intensity also exhibited the strongest performance (<italic>F</italic><sub>1</sub>-score=0.85), while heterogeneity followed with <italic>F</italic><sub>1</sub>-score of 0.67. Notably, the combined feature approach achieved <italic>F</italic><sub>1</sub>-score (0.77), emphasizing the advantage of integrating multiple features, particularly weak ones.</p>
          <table-wrap position="float" id="table1">
            <label>Table 1</label>
            <caption>
              <p>Performance metrics for key radiomic features in multiclass liver disease classification. Results are derived from logistic regression models configured as described above.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="200"/>
              <col width="200"/>
              <col width="200"/>
              <col width="200"/>
              <col width="200"/>
              <thead>
                <tr valign="top">
                  <td>Feature</td>
                  <td>Accuracy</td>
                  <td>Sensitivity</td>
                  <td>Specificity</td>
                  <td><italic>F</italic><sub>1</sub>-score</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Echo-intensity</td>
                  <td>0.62</td>
                  <td>0.56</td>
                  <td>0.62</td>
                  <td>0.85</td>
                </tr>
                <tr valign="top">
                  <td>Heterogeneity</td>
                  <td>0.57</td>
                  <td>0.50</td>
                  <td>0.55</td>
                  <td>0.67</td>
                </tr>
                <tr valign="top">
                  <td>Skewness</td>
                  <td>0.57</td>
                  <td>0.47</td>
                  <td>0.62</td>
                  <td>0.63</td>
                </tr>
                <tr valign="top">
                  <td>Kurtosis</td>
                  <td>0.48</td>
                  <td>0.36</td>
                  <td>0.64</td>
                  <td>0.63</td>
                </tr>
                <tr valign="top">
                  <td>ASM</td>
                  <td>0.48</td>
                  <td>0.33</td>
                  <td>0.67</td>
                  <td>0.42</td>
                </tr>
                <tr valign="top">
                  <td>Energy</td>
                  <td>0.57</td>
                  <td>0.47</td>
                  <td>0.66</td>
                  <td>0.42</td>
                </tr>
                <tr valign="top">
                  <td>Contrast</td>
                  <td>0.52</td>
                  <td>0.41</td>
                  <td>0.55</td>
                  <td>0.58</td>
                </tr>
                <tr valign="top">
                  <td>Dissimilarity</td>
                  <td>0.52</td>
                  <td>0.41</td>
                  <td>0.55</td>
                  <td>0.58</td>
                </tr>
                <tr valign="top">
                  <td>Entropy</td>
                  <td>0.62</td>
                  <td>0.60</td>
                  <td>0.42</td>
                  <td>0.56</td>
                </tr>
                <tr valign="top">
                  <td>Homogeneity</td>
                  <td>0.52</td>
                  <td>0.41</td>
                  <td>0.60</td>
                  <td>0.54</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <p>ROC curve analysis for features combined using a decision tree classifier showed the following AUC values: 0.75 for fibrosis, 0.87 for normal, and 0.97 for steatosis (<xref rid="figure4" ref-type="fig">Figure 4</xref>). ROC comparison for individual features is shown in <xref ref-type="table" rid="table2">Table 2</xref> (Figure S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The comparison showed that echo intensity and heterogeneity were highest in fibrosis (0.91 and 0.86, respectively), suggesting increased structural disruption compared with steatosis and normal liver. ASM was highest in steatosis (0.88), reflecting greater textural uniformity, while fibrosis had the lowest value, indicative of higher heterogeneity. Contrast and dissimilarity, measures of local intensity variation, were most pronounced in fibrosis (0.79 and 0.73, respectively) and lowest in normal liver, reinforcing fibrosis’s greater textural complexity. Homogeneity and energy, which indicate texture smoothness and uniformity, were highest in normal liver (0.87 and 0.58, respectively), reflecting well-organized tissue architecture, and lowest in fibrosis, further supporting its structural disorganization.</p>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>This ROC curve illustrates the diagnostic performance of ChatGPT-4 in classifying liver conditions using a decision tree model based on combined features. The model’s performance is evaluated across 3 classes: Fibrosis (the ROC curve for fibrosis shows an AUC [area under the ROC curve] of 0.75, indicating moderate diagnostic accuracy), Normal (the ROC curve for the normal class shows an AUC of 0.87, suggesting high diagnostic accuracy), and Steatosis (the ROC curve for steatosis shows an AUC of 0.97, indicating excellent diagnostic accuracy). The black dashed line represents a random guess with an AUC of 0.50. This figure demonstrates the capability of ChatGPT-4 to distinguish between different liver conditions with varying degrees of accuracy. ROC: receiver operating characteristic.</p>
            </caption>
            <graphic xlink:href="ai_v4i1e68144_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>This table presents the area under the receiver operating characteristic (ROC) curve (AUC) values for radiomic features extracted from liver ultrasound images using ChatGPT-4, assessing their ability to differentiate fibrosis, steatosis, and normal liver tissue. These findings demonstrate the feasibility of ChatGPT-4–assisted ultrasound radiomics for noninvasive liver disease characterization.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="300"/>
              <col width="240"/>
              <col width="230"/>
              <col width="230"/>
              <thead>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Fibrosis</td>
                  <td>Steatosis</td>
                  <td>Normal</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Echo intensity</td>
                  <td>0.91</td>
                  <td>0.39</td>
                  <td>0.12</td>
                </tr>
                <tr valign="top">
                  <td>Heterogeneity</td>
                  <td>0.86</td>
                  <td>0.33</td>
                  <td>0.23</td>
                </tr>
                <tr valign="top">
                  <td>Kurtosis</td>
                  <td>0.22</td>
                  <td>0.51</td>
                  <td>0.82</td>
                </tr>
                <tr valign="top">
                  <td>Skewness</td>
                  <td>0.22</td>
                  <td>0.45</td>
                  <td>0.87</td>
                </tr>
                <tr valign="top">
                  <td>Angular second momentum</td>
                  <td>0.11</td>
                  <td>0.88</td>
                  <td>0.59</td>
                </tr>
                <tr valign="top">
                  <td>Correlation</td>
                  <td>0.53</td>
                  <td>0.33</td>
                  <td>0.61</td>
                </tr>
                <tr valign="top">
                  <td>Dissimilarity</td>
                  <td>0.73</td>
                  <td>0.39</td>
                  <td>0.33</td>
                </tr>
                <tr valign="top">
                  <td>Contrast</td>
                  <td>0.79</td>
                  <td>0.42</td>
                  <td>0.22</td>
                </tr>
                <tr valign="top">
                  <td>Entropy</td>
                  <td>0.44</td>
                  <td>0.90</td>
                  <td>0.21</td>
                </tr>
                <tr valign="top">
                  <td>Energy</td>
                  <td>0.49</td>
                  <td>0.43</td>
                  <td>0.58</td>
                </tr>
                <tr valign="top">
                  <td>Homogeneity</td>
                  <td>0.20</td>
                  <td>0.49</td>
                  <td>0.87</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
        </sec>
        <sec>
          <title>Reproducibility and Reliability</title>
          <p>To assess the reproducibility of ChatGPT-4 outputs across users, 2 independent observers used the same ChatGPT-4–assisted workflow to select ROIs and extract radiomic features from the same ultrasound images. Both were trained physicians with clinical and research expertise in liver ultrasound. The ICC was calculated across the extracted radiomic features to quantify consistency. The results demonstrated high reproducibility for most features exceeding ICC of 0.8, with energy (ICC=0.96), correlation (ICC=0.92), and echo intensity (ICC=0.88) showing excellent agreement between observers (<xref ref-type="table" rid="table3">Table 3</xref>). Entropy (ICC=0.81) and homogeneity (ICC=0.81) also indicated strong reliability, suggesting consistent feature extraction across different evaluators. Skewness (ICC=0.6) exhibited moderate agreement, while ASM showed the lowest ICC (ICC=0.25), indicating poor reproducibility for this metric.</p>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>This table presents the intraclass correlation coefficients (ICC) assessing interobserver agreement for key radiomic features extracted from liver ultrasound images using ChatGPT-4. These results indicate strong to excellent reliability for most features, supporting the robustness of ChatGPT-4–assisted radiomic analysis in liver ultrasound imaging.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="500"/>
              <col width="500"/>
              <thead>
                <tr valign="top">
                  <td>Feature</td>
                  <td>ICC</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Echo-intensity</td>
                  <td>0.88</td>
                </tr>
                <tr valign="top">
                  <td>Heterogeneity</td>
                  <td>0.90</td>
                </tr>
                <tr valign="top">
                  <td>Kurtosis</td>
                  <td>0.39</td>
                </tr>
                <tr valign="top">
                  <td>Skewness</td>
                  <td>0.60</td>
                </tr>
                <tr valign="top">
                  <td>Angular second momentum</td>
                  <td>0.25</td>
                </tr>
                <tr valign="top">
                  <td>Correlation</td>
                  <td>0.92</td>
                </tr>
                <tr valign="top">
                  <td>Dissimilarity</td>
                  <td>0.78</td>
                </tr>
                <tr valign="top">
                  <td>Contrast</td>
                  <td>0.89</td>
                </tr>
                <tr valign="top">
                  <td>Entropy</td>
                  <td>0.81</td>
                </tr>
                <tr valign="top">
                  <td>Energy</td>
                  <td>0.96</td>
                </tr>
                <tr valign="top">
                  <td>Homogeneity</td>
                  <td>0.81</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
        </sec>
        <sec>
          <title>Binary Classification of Healthy Liver Versus Steatosis and Fibrosis Using ChatGPT-4 Ultrasound Radiomics</title>
          <p>Significant distinctions were observed between normal liver and diseased conditions, particularly in 8 out of 10 analyzed features. For the binary comparison between normal and liver diseases (steatosis and fibrosis), 8 features showed significant differences (&lt;0.05): echo-intensity (27.47 vs 50.47), heterogeneity (423.96 vs 687.17), skewness (0.95 vs 1.86), kurtosis (1.34 vs 5.24), energy (0.18 vs 0.22), contrast (30.65 vs 57.37), ASM (0.003 vs 0.001), and homogeneity (0.20 vs 0.38), for normal versus liver disease, respectively.</p>
          <p>Comparing normal to fibrosis revealed significant differences in 8 features (&lt;0.05): echo-intensity, heterogeneity, skewness, kurtosis, entropy, contrast, homogeneity, and correlation. For normal versus steatosis, 6 features showed significant differences: echo-intensity, entropy, skewness, kurtosis, Homogeneity, ASM, and energy. These mean values for the features are summarized in <xref ref-type="table" rid="table4">Table 4</xref>.</p>
          <table-wrap position="float" id="table4">
            <label>Table 4</label>
            <caption>
              <p>This table illustrates the differences between liver disease groups (normal, steatosis, and fibrosis) by showing the mean values of features extracted through ChatGPT-4–based radiomics analysis. The features include echo intensity, heterogeneity, skewness, kurtosis, contrast, homogeneity, dissimilarity, angular second momentum (ASM), and entropy. The mean values for these features provide insights into the distinct textural characteristics associated with each liver disease group.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="100"/>
              <col width="100"/>
              <col width="100"/>
              <col width="80"/>
              <col width="90"/>
              <col width="90"/>
              <col width="80"/>
              <col width="90"/>
              <col width="100"/>
              <col width="100"/>
              <col width="70"/>
              <thead>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Echo-intensity, mean (SD)</td>
                  <td>Heterogeneity, mean (SD)</td>
                  <td>Entropy, mean (SD)</td>
                  <td>Skewness, mean (SD)</td>
                  <td>Kurtosis, mean (SD)</td>
                  <td>Energy, mean (SD)</td>
                  <td>Contrast, mean (SD)</td>
                  <td>Dissimilarity, mean (SD)</td>
                  <td>Homogeneity, mean (SD)</td>
                  <td>ASM, mean (SD)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Liver disease (fibrosis and steatosis)</td>
                  <td>50.47 (2.34)</td>
                  <td>687.17 (57.19)</td>
                  <td>8.91 (0.28)</td>
                  <td>0.95 (0.05)</td>
                  <td>1.34 0.20</td>
                  <td>0.18 (0.03)</td>
                  <td>57.37 (4.31)</td>
                  <td>5.73 (0.26)</td>
                  <td>0.20 (0.01)</td>
                  <td>0.001 (0.00)</td>
                </tr>
                <tr valign="top">
                  <td>Liver fibrosis</td>
                  <td>56.67 (2.56)</td>
                  <td>857.07 (65.62)</td>
                  <td>7.99 (0.34)</td>
                  <td>0.93 (0.07)</td>
                  <td>1.18 (0.27)</td>
                  <td>0.18 (0.03)</td>
                  <td>64.65 (5.04)</td>
                  <td>6.21 (0.29)</td>
                  <td>0.19 (0.02)</td>
                  <td>0.001 (0.00)</td>
                </tr>
                <tr valign="top">
                  <td>Liver steatosis</td>
                  <td>39.10 (2.88)</td>
                  <td>366.24 (46.85)</td>
                  <td>10.66 (0.06)</td>
                  <td>0.97 (0.06)</td>
                  <td>1.64 (0.29)</td>
                  <td>0.19 (0.05)</td>
                  <td>43.62 (6.71)</td>
                  <td>4.81 (0.43)</td>
                  <td>0.22 (0.02)</td>
                  <td>0.001 (0.00)</td>
                </tr>
                <tr valign="top">
                  <td>Normal</td>
                  <td>27.44 (2.32)</td>
                  <td>423.96 (57.93)</td>
                  <td>8.15 (0.46)</td>
                  <td>1.86 (0.15)</td>
                  <td>5.24 (0.97)</td>
                  <td>0.22 (0.03)</td>
                  <td>30.65 (5.22)</td>
                  <td>4.88 (0.77)</td>
                  <td>0.38 (0.03)</td>
                  <td>0.003 (0.00)</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
        </sec>
        <sec>
          <title>Distinguishing Liver Disease by ChatGPT-4–Based Ultrasound Image Findings</title>
          <p>The classification tool for liver ultrasound images exhibited strong diagnostic performance across 3 categories: normal liver, fibrosis, and steatosis. Achieving an overall accuracy of 77%, the tool demonstrated its potential in aiding radiological assessments. For normal liver conditions, the tool achieved a precision, recall, and <italic>F</italic><sub>1</sub>-score of 0.75, indicating reliable detection accuracy. In the case of fibrosis, the tool excelled with a perfect recall of 1.00, meaning it successfully identified all fibrosis cases, and an <italic>F</italic><sub>1</sub>-score of 0.86, with a precision of 0.75. This highlights its robustness in diagnosing fibrotic conditions without missing any positive cases. However, for steatosis, while the tool showed a high precision of 0.80, the recall was slightly lower at 0.67, leading to an <italic>F</italic><sub>1</sub>-score of 0.73. This indicates a strong ability to correctly identify steatosis when predicted, though there is room for improvement in sensitivity.</p>
          <p>The macroaveraged metrics (precision=0.77, recall=0.81, and <italic>F</italic><sub>1</sub>-score=0.78) and weighted averages (precision=0.77, recall=0.77, and <italic>F</italic><sub>1</sub>-score=0.76) further underscore the tool’s balanced performance across different liver conditions. These results suggest that while the tool is already valuable for distinguishing normal and abnormal liver conditions, further refinements could enhance its sensitivity, particularly for steatosis.</p>
        </sec>
      </sec>
      <sec>
        <title>Evaluation of IDL-Based Ultrasound Radiomics for Liver Disease Classification</title>
        <sec>
          <title>Identification of Key Textural Features</title>
          <p>The radiomics analysis of liver ultrasound images conducted using IDL has provided significant insights into the textural characteristics associated with various liver diseases. Through ANOVA analysis, 9 textural features were identified as significantly varying among groups with different liver conditions (<xref rid="figure5" ref-type="fig">Figure 5</xref>).</p>
          <fig id="figure5" position="float">
            <label>Figure 5</label>
            <caption>
              <p>Interactive data language (IDL)–based radiomics analysis in liver ultrasound images: This figure presents the distribution of texture parameters extracted using IDL from liver ultrasound images, comparing 3 diagnostic groups: fibrosis (blue), normal liver (orange), and steatosis (green). (A) Plot displays first-order texture features, including echointensity, heterogeneity, kurtosis, and skewness. Fibrotic livers exhibit increased echogenicity and heterogeneity compared with both normal and steatotic livers, reflecting structural alterations associated with fibrosis. (B) Plot illustrates higher-order texture features, including ASM, entropy, GLCM mean, GLCM variance, and correlation. Fibrotic livers demonstrate higher GLCM mean and variance, indicating greater textural complexity, whereas normal liver tissue exhibits lower values for these parameters but higher ASM and correlation, suggesting a more homogeneous texture. These findings highlight the capability of IDL-based radiomics in quantifying microstructural liver alterations across different pathological states, reinforcing its potential as an advanced imaging biomarker for disease characterization. ASM: angular second moment; GLCM: gray-level co-occurrence matrix.</p>
            </caption>
            <graphic xlink:href="ai_v4i1e68144_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Predictive Performance of Features</title>
          <p>The predictive performance of features of these ultrasound imaging features varied, with accuracy ranging from 0.47 to 0.76 sensitivity, from 0.33 to 0.73, and specificity from 0.43 to 0.70 (<xref ref-type="table" rid="table5">Table 5</xref>). The feature “echo-intensity” demonstrated the highest performance with an accuracy of 0.76, sensitivity of 0.73, and specificity of 0.53, indicating balanced performance. Similarly, “Heterogeneity” also showed an accuracy of 0.76, with a sensitivity of 0.68 and a specificity of 0.51. On the other hand, “Kurtosis” had lower accuracy at 0.48 and sensitivity at 0.38, but a higher specificity of 0.64, highlighting its strength in correctly identifying true negative cases. Integrating multiple textural features enhances diagnostic performance. By combining the features, the overall accuracy improved to 0.77, with a notable accuracy of 0.89. Similarly, <italic>F</italic><sub>1</sub>-score performance varied across features, with echo intensity achieving the highest <italic>F</italic><sub>1</sub>-score (0.84), indicating its superior predictive power. Heterogeneity also performed well, with an <italic>F</italic><sub>1</sub>-score of 0.56. In contrast, kurtosis, ASM, entropy, and correlation had the lowest <italic>F</italic><sub>1</sub>-scores (ranging from 0.26), reflecting weaker predictive contributions. Notably, the combined feature approach achieved the highest <italic>F</italic><sub>1</sub>-score (0.81), emphasizing the advantage of integrating multiple features to enhance predictive accuracy.</p>
          <table-wrap position="float" id="table5">
            <label>Table 5</label>
            <caption>
              <p>Diagnostic accuracy and performance of radiomic features extracted using interactive data language software. This table displays the diagnostic accuracy and performance metrics for various textural features extracted from the liver ultrasound images using interactive data language software as part of liver texture analysis. These metrics provide insights into the effectiveness of each feature in distinguishing between different liver conditions, contributing to the overall assessment of liver disease.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="290"/>
              <col width="180"/>
              <col width="190"/>
              <col width="190"/>
              <col width="150"/>
              <thead>
                <tr valign="top">
                  <td>Feature</td>
                  <td>Accuracy</td>
                  <td>Sensitivity</td>
                  <td>Specificity</td>
                  <td><italic>F</italic><sub>1</sub>-score</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Echo-intensity</td>
                  <td>0.76</td>
                  <td>0.73</td>
                  <td>0.53</td>
                  <td>0.84</td>
                </tr>
                <tr valign="top">
                  <td>Heterogeneity</td>
                  <td>0.76</td>
                  <td>0.68</td>
                  <td>0.51</td>
                  <td>0.56</td>
                </tr>
                <tr valign="top">
                  <td>Kurtosis</td>
                  <td>0.47</td>
                  <td>0. 38</td>
                  <td>0.64</td>
                  <td>0.38</td>
                </tr>
                <tr valign="top">
                  <td>Skewness</td>
                  <td>0.67</td>
                  <td>0.58</td>
                  <td>0.55</td>
                  <td>0.48</td>
                </tr>
                <tr valign="top">
                  <td>ASM<sup>a</sup></td>
                  <td>0.48</td>
                  <td>0.33</td>
                  <td>0.56</td>
                  <td>0.26</td>
                </tr>
                <tr valign="top">
                  <td>Entropy</td>
                  <td>0.48</td>
                  <td>0.33</td>
                  <td>0.43</td>
                  <td>0.26</td>
                </tr>
                <tr valign="top">
                  <td>GLCM<sup>b</sup>_mean</td>
                  <td>0.67</td>
                  <td>0.56</td>
                  <td>0.51</td>
                  <td>0.48</td>
                </tr>
                <tr valign="top">
                  <td>GLCM_variance</td>
                  <td>0.62</td>
                  <td>0.59</td>
                  <td>0.53</td>
                  <td>0.57</td>
                </tr>
                <tr valign="top">
                  <td>Correlation</td>
                  <td>0.48</td>
                  <td>0.33</td>
                  <td>0.7</td>
                  <td>0.26</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table5fn1">
                <p><sup>a</sup>ASM: angular second momentum.</p>
              </fn>
              <fn id="table5fn2">
                <p><sup>b</sup>GLCM: gray-level co-occurrence matrix.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
      </sec>
      <sec>
        <title>Comparison Between ChatGPT and IDL Features</title>
        <sec>
          <title>Correlation and Agreement Analysis Between Feature Sets</title>
          <p>To assess the relationship between ChatGPT-4-derived features and IDL-based features, we performed correlation and agreement analyses. Each feature set was consolidated into a single value using multiple regression, allowing for a direct, one-to-one comparison between the 2 methods. The multiple linear regression model using ordinary least squares was applied to combine all extracted radiomic features into a single predicted value per image, with the liver disease category as the dependent variable. This was done separately for ChatGPT-4 and IDL outputs to generate comparable summary values. The results showed a moderate positive correlation (<italic>r</italic>=0.64) across all extracted features, which was statistically significant (<italic>P</italic>&lt;.001; <xref rid="figure6" ref-type="fig">Figure 6</xref>). In other words, increases in ChatGPT-4 feature values tended to coincide with increases in IDL feature values, albeit with some variability. While this correlation is not perfect, it demonstrates that ChatGPT-4–derived features are reasonably well aligned with IDL features, supporting the feasibility of ChatGPT-4 for ultrasound radiomics analysis.</p>
          <fig id="figure6" position="float">
            <label>Figure 6</label>
            <caption>
              <p>Correlation between ChatGPT Features and IDL Features. The scatter plot illustrates the relationship between ChatGPT features and IDL features, with a Pearson correlation coefficient (r) of 0.64 and a significant <italic>P</italic> value (<italic>P</italic>&lt;.001). Each blue circle represents an individual data point, while the solid black line shows the fitted linear regression model. The shaded region surrounding the regression line represents the 95% CI. The moderate positive correlation suggests that as ChatGPT features increase, IDL features tend to increase as well, indicating a consistent, albeit not perfect, relationship between the 2 feature sets. IDL: interactive data language.</p>
            </caption>
            <graphic xlink:href="ai_v4i1e68144_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>We further examined the correlation between the 2 software packages by focusing on 7 common features extracted by ChatGPT-4 and IDL showing a correlation (<italic>r</italic>) of 0.68 (<xref rid="figure7" ref-type="fig">Figure 7</xref>). The degree of correlation varied among the individual features, with the strongest correlation observed for combined features (<italic>r</italic>=0.68, <italic>P</italic>&lt;.001). Notably, first-order histogram measures such as echo-intensity (<italic>r</italic>=0.60) and kurtosis (<italic>r</italic>=0.52) showed stronger correlations, whereas GLCM-based features exhibited weaker alignment. In particular, measures like correlation and kurtosis derived from GLCM demonstrated lower correlations.</p>
          <fig id="figure7" position="float">
            <label>Figure 7</label>
            <caption>
              <p>This figure presents scatter plots illustrating the correlation between radiomic features extracted using ChatGPT-4 and the corresponding common features derived from the reference software, IDL. Each subplot represents a specific feature, with ChatGPT-4 values on the x-axis and IDL values on the y-axis. Linear regression lines with shaded 95% CIs are shown to illustrate the strength and direction of the associations. Pearson correlation coefficients (r) and <italic>P</italic> values (P) are reported for each feature. Strong correlations were observed for echo intensity (r=0.66, <italic>P</italic>&lt;.001) and skewness (r=0.50, <italic>P</italic>&lt;.001), while entropy (r=0.18, <italic>P</italic>=.13), correlation (r=–0.12, <italic>P</italic>=.33), and ASM (r=–0.10, <italic>P</italic>=.41) showed weaker or nonsignificant associations. The final plot displays a combined score derived from the 7 shared features, generated using multiple regression. This aggregated output demonstrated a moderate correlation (r=0.68) between ChatGPT-4 and IDL, supporting overall agreement across platforms. These findings highlight both the variability in feature-level agreement and the potential value of composite feature models in radiomics analysis. ASM: angular second momentum; IDL: interactive data language.</p>
            </caption>
            <graphic xlink:href="ai_v4i1e68144_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>To further assess agreement between features extracted by the 2 software, a Bland-Altman analysis was performed (Figure S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The results demonstrated that combined features exhibited the best agreement, with narrow agreement limits and minimal bias, reinforcing their robustness. Skewness showed particularly strong agreement, indicating interchangeability between ChatGPT-4 and IDL for this feature. Minimal proportional bias was observed for well-correlated features, supporting the feasibility of using ChatGPT-4 for radiomics analysis in this context. Based on these results, the agreement between ChatGPT-4 and IDL-derived features can be categorized into three levels: (1) strong agreement (reliable and interchangeable): skewness and correlation; (2) moderate agreement (requires minor adjustments): ASM, entropy, and echo-intensity; and (3) weak agreement (fundamental differences requiring major corrections): kurtosis and heterogeneity.</p>
        </sec>
        <sec>
          <title>Processing Time Comparison</title>
          <p>In addition to feature correlation, we also compared batch processing efficiency between ChatGPT-4 and IDL-based tools. The results demonstrated that ChatGPT-4 outperformed IDL in processing speed. ChatGPT-4 completed the entire analysis process—including ROI selection, refinement, and texture analysis—in 4 minutes and 12 seconds for a batch of 10 images (the maximum batch size). In contrast, IDL required approximately 50 seconds per case, totaling over 8 minutes for the same batch. ChatGPT, therefore, showed more than a 40% reduction in processing time, highlighting ChatGPT-4’s efficiency in automated batch processing. These findings suggest that ChatGPT-4 provides a viable alternative for high-throughput ultrasound radiomics analysis, offering both speed and reasonable alignment with IDL-based feature extraction.</p>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Expanding ChatGPT-4’s Role in Radiology</title>
        <p>AI and natural language processing tools, such as ChatGPT, have been increasingly explored for their role in enhancing radiology workflows [<xref ref-type="bibr" rid="ref39">39</xref>]. Recent studies demonstrated how ChatGPT can be integrated into radiology workflows to improve efficiency in patient registration, scheduling, image acquisition, interpretation, and reporting [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. The findings of these studies highlight ChatGPT’s potential to streamline repetitive tasks, reduce radiologist workload, and enhance communication in diagnostic imaging. Our study builds upon this foundation by extending the role of ChatGPT-4 beyond workflow optimization into advanced radiomics analysis. Specifically, we evaluate ChatGPT-4’s ability to extract quantitative ultrasound texture features, distinguish between different liver disease states, and compare its performance against conventional radiomics software. By bridging workflow optimization with diagnostic analysis, our findings contribute to the ongoing evolution of AI-assisted radiology, reinforcing ChatGPT’s potential as a tool for both administrative and analytical applications in medical imaging.</p>
      </sec>
      <sec>
        <title>ChatGPT-4’s Diagnostic Performance and Reproducibility</title>
        <p>Our study results show that ChatGPT-4’s radiomic analysis exhibited robust performance in distinguishing among the 3 liver pathology groups, achieving a sensitivity of 0.83 and AUC exceeding 0.75, when all radiomic features were combined. While the diagnostic utility of individual features varied, the aggregated analysis compensated for weaker predictors, thereby enhancing overall classification accuracy. Moreover, the high ICC values observed between independent observers (reaching 0.92) suggest excellent reproducibility, reinforcing the robustness of ChatGPT-4–derived texture parameters in ultrasound imaging. However, not all features demonstrated high reliability; for instance, ASM yielded an ICC of 0.25, indicating poor reproducibility. Such discrepancies in intraclass agreement for extracted ultrasound features can arise from small differences in ROI placement, even if they are close. Ultrasound images are highly sensitive to pixel-level changes, which can affect texture-based features. Additionally, interpolation effects, quantization errors, and software implementation variability can contribute to differences. These findings underscore the need for further refinement in feature extraction methodologies, particularly for features with lower reproducibility. Future research should prioritize the standardization of algorithms to enhance observer consistency, ensuring that AI-generated radiomic features are both reliable and clinically actionable.</p>
      </sec>
      <sec>
        <title>Interpretation of the Radiomic Biomarkers in Liver Disease</title>
        <p>The radiomic biomarkers identified in this study align with established pathophysiological changes in liver disease. Increased heterogeneity and entropy, for instance, reflect greater structural disorder where excessive collagen deposition disrupts tissue uniformity, consistent with fibrosis [<xref ref-type="bibr" rid="ref42">42</xref>,<xref ref-type="bibr" rid="ref43">43</xref>]. GLCM-based texture features provide additional microstructural insights, for example, ASM (or energy) serving as an index of texture uniformity—higher values indicate preserved architecture, while lower values suggest structural disruption, such as that seen in fibrosis [<xref ref-type="bibr" rid="ref44">44</xref>]. These features may serve as robust, noninvasive biomarkers for disease detection and monitoring. Our results showed that distinct textural patterns can be related to different liver conditions. Fibrosis presents with increased echogenicity, heterogeneity, and contrast, indicating architectural disruption. While steatosis also exhibits high echogenicity, it is associated with a smoother, more homogeneous texture, suggesting uniform yet structurally altered tissue. In contrast, normal liver maintains the most uniform texture, with high homogeneity and low contrast, reflecting preserved tissue organization.</p>
      </sec>
      <sec>
        <title>Comparison of ChatGPT-4 With Traditional Image Analysis Software</title>
        <p>Direct comparison between features extracted by ChatGPT-4 and IDL revealed a moderate correlation (<italic>r</italic>=0.68), when features are combined with notable variations between specific features on an individual basis. First-order features, which primarily assess pixel intensity distributions, exhibited strong agreement between the 2 platforms, whereas GLCM-based features showed greater discrepancies. This discrepancy is likely attributable to differences in pixel adjacency definitions, quantization methods, and sampling protocols across the 2 analytical frameworks. These results highlight a persistent challenge in radiomics: reproducibility across different software implementations. Variability in image acquisition parameters, preprocessing steps, and computational feature extraction methodologies can significantly impact radiomic feature consistency. Prior studies have underscored the necessity of harmonized radiomic pipelines to enhance cross-platform reproducibility [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. Establishing standardized radiomic workflows will be critical for ensuring the clinical applicability of AI-driven ultrasound analysis.</p>
        <p>A key advantage of ChatGPT-4 in this study was its ability to process multiple images in parallel, demonstrating significant efficiency gains over conventional software. Notably, processing time was reduced by more than 40% compared with IDL, suggesting that AI-driven tools can significantly enhance radiological workflow efficiency. This capability is particularly valuable in research settings requiring high-throughput image analysis, as well as in clinical environments where real-time assessment is essential for guiding interventional procedures. Moreover, ChatGPT-4’s scalability supports its application in large-scale imaging studies, enabling rapid dataset processing while minimizing manual input. This efficiency could facilitate applications in population-based screening programs, multicenter trials, and AI-assisted educational platforms. While compute capacity was controlled for in this study, we acknowledge that hardware variability can influence software performance. Future work should evaluate AI efficiency across diverse computing environments to better account for system-dependent constraints. Despite its slightly lower diagnostic performance compared with IDL, the results are encouraging given that ChatGPT-4 was not originally designed for medical image analysis. With additional domain-specific training and fine-tuning using large-scale ultrasound datasets, its performance is expected to improve. Future research should explore ChatGPT-4’s integration into routine radiology workflows, particularly in triage settings, where automated interpretation of liver ultrasound images could expedite clinical decision-making and optimize resource allocation.</p>
      </sec>
      <sec>
        <title>Limitations and Challenges</title>
        <sec>
          <title>Session Variability and Model Robustness</title>
          <p>Despite its promising performance, ChatGPT-4 exhibited session-dependent variability in feature extraction. This phenomenon, which possibly arises from differences in how the model processes context and maintains internal states across separate analyses, introduces potential inconsistencies in feature reproducibility. While batch analyses remained stable, independent session resets occasionally yielded variations in extracted parameters. Session-dependent variability is a recognized limitation of large language models [<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>] and warrants further investigation in the context of medical imaging. To mitigate this challenge, we refined our prompting strategies, ensuring that feature extraction parameters were explicitly aligned across sessions. While steps were taken to standardize ChatGPT-4 prompts and maintain session continuity, variability in output due to the model’s inherent stochastic nature remains a limitation. Although incomplete feature sets were addressed through repeated prompting and prompt refinement, future studies may also benefit from averaging outputs across multiple runs or sessions to account for variability and enhance consistency. Additionally, future research should prioritize the development of standardized initialization protocols and structured prompt engineering strategies to improve the reproducibility of AI-driven radiomic analyses.</p>
        </sec>
        <sec>
          <title>Automated ROI Selection</title>
          <p>A key limitation of ChatGPT-4 is its fully automated ROI selection, which lacks the flexibility and precision needed for clinical applications. This may affect diagnostic accuracy, especially when critical pathological features fall outside the AI-defined ROI. While ChatGPT-4 does not allow direct manual ROI adjustments, we used a hybrid approach [<xref ref-type="bibr" rid="ref50">50</xref>], iteratively refining prompts to guide the model until the desired ROI was accurately identified. This method combined AI-driven automation with user oversight, improving ROI placement and reducing errors. Future iterations of ChatGPT-4 could enhance clinical applicability by incorporating interactive manual ROI modifications [<xref ref-type="bibr" rid="ref51">51</xref>]. Additionally, integrating advanced machine learning algorithms could refine automated ROI selection, allowing AI to prioritize clinically relevant areas [<xref ref-type="bibr" rid="ref52">52</xref>]. A promising direction is the development of hybrid models that preselect an ROI while allowing clinician refinement, balancing automation with expert oversight [<xref ref-type="bibr" rid="ref53">53</xref>].</p>
        </sec>
        <sec>
          <title>Preclinical Model and Clinical Translatability</title>
          <p>Our preclinical liver disease model closely mirrors human pathology, with histological findings aligning well with clinical presentations of fibrosis and steatosis. This translatability strengthens the relevance of our results; the model has undergone extensive validation to ensure robustness and suitability for studying liver disease [<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref32">32</xref>]. Nonetheless, this study serves as an initial assessment of ChatGPT-4 in a preclinical setting. Future work will extend to human liver ultrasound datasets, potentially involving diverse populations and multiple medical centers to enhance generalizability. Importantly, moving to clinical datasets raises privacy and ethical concerns, requiring strict compliance with HIPAA (Health Insurance Portability and Accountability Act), GDPR (General Data Protection Regulation), and other data security frameworks. Additionally, AI bias—stemming from skewed or nonrepresentative training data—remains a critical challenge, necessitating multicenter validation to ensure fairness and accuracy across varied clinical settings.</p>
        </sec>
        <sec>
          <title>Uncertainty in AI-Generated Diagnoses</title>
          <p>Reliable AI outputs are critical in medical imaging. Currently, ChatGPT-4 lacks inherent uncertainty quantification. Integrating probabilistic methods could improve reliability by assigning confidence levels based on prior data distributions, similar to deep learning–based radiomics [<xref ref-type="bibr" rid="ref54">54</xref>]. Monte Carlo dropout modeling could provide uncertainty intervals, flagging cases needing further review [<xref ref-type="bibr" rid="ref55">55</xref>]. Ensemble modeling could further enhance reliability through consensus-based confidence scores [<xref ref-type="bibr" rid="ref56">56</xref>]. Explainability improvements, such as structured reasoning frameworks, would support informed decision-making [<xref ref-type="bibr" rid="ref57">57</xref>]. Implementing these methods would ensure ChatGPT-4 functions as a decision-support tool rather than an autonomous diagnostic system.</p>
        </sec>
      </sec>
      <sec>
        <title>Clinical Applications and Future Directions</title>
        <p>This study highlights ChatGPT-4’s potential in medical imaging, particularly in image interpretation. While CNNs have achieved over 90% accuracy in tasks like liver fibrosis staging [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref58">58</xref>], ChatGPT-4 offers distinct advantages by integrating image analysis with narrative generation and enabling interactive ROI refinement [<xref ref-type="bibr" rid="ref19">19</xref>]. Unlike traditional deep learning models requiring extensive training, ChatGPT-4’s adaptability supports multimodal integration, making it a promising tool for clinical applications. Fine-tuning on specialized datasets could enhance its diagnostic accuracy, bridging the gap between specialized AI models and broader usability. Enhancing ChatGPT-4’s clinical utility involves several advancements. Transfer learning can improve domain-specific accuracy by incorporating structured radiology reports and labeled diagnostic cases [<xref ref-type="bibr" rid="ref59">59</xref>]. Multimodal training could allow it to analyze medical images alongside textual and radiomic data, improving correlation with clinical insights [<xref ref-type="bibr" rid="ref60">60</xref>]. Real-time clinical decision support through interactive learning could refine outputs, while integrating longitudinal patient data could enhance disease monitoring, particularly for chronic conditions [<xref ref-type="bibr" rid="ref61">61</xref>]. Future research should compare ChatGPT-4 with leading deep learning models to evaluate its role in multimodal medical imaging.</p>
        <p>Integrating ChatGPT-4 into clinical workflows has the potential to enhance diagnostic efficiency by streamlining triage, anomaly detection, and preliminary report generation. AI-driven tools have demonstrated the ability to expedite time-to-diagnosis by prioritizing critical imaging findings [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref19">19</xref>]. In liver ultrasound, ChatGPT-4 could assist by distinguishing normal scans or minor abnormalities, allowing radiologists to focus on complex cases. Its radiomic analysis capabilities may facilitate early disease detection, akin to AI models that have identified microvascular changes in brain imaging and tumor margins in mammography [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. Automated report generation is another promising application, as AI-generated reports have been shown to match human interpretation in accuracy [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. Additionally, real-time feedback during ultrasound-guided procedures and batch-processing for large-scale imaging analysis could support multicenter studies and population-based disease screening [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]. Despite its potential, key challenges include validation across diverse populations, regulatory approval, and clinician training. Prospective studies are needed to assess ChatGPT-4’s impact on diagnostic accuracy, workflow efficiency, and patient outcomes. Addressing these challenges could establish ChatGPT-4 as a transformative tool in radiology, optimizing early disease detection and clinical workflows.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>In conclusion, our study confirms the feasibility of using ChatGPT-4 for liver disease diagnosis through ultrasound image analysis, emphasizing its potential to assist radiologists in making more accurate diagnoses. Despite some limitations, ChatGPT-4's ability to efficiently handle large-scale image datasets and its robust feature extraction capabilities make it a valuable tool for enhancing diagnostic accuracy and supporting radiological decision-making. By integrating ChatGPT-4 into radiological workflows, radiologists can leverage its capabilities to improve the precision and efficiency of liver ultrasound image analysis. This tool's potential to manage vast amounts of data with high efficiency is particularly appealing in modern medical research and clinical practice.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Additional figures and tables.</p>
        <media xlink:href="ai_v4i1e68144_app1.docx" xlink:title="DOCX File , 685 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">ASM</term>
          <def>
            <p>angular second momentum</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">AUC</term>
          <def>
            <p>area under the receiver operating characteristic curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">GDPR</term>
          <def>
            <p>General Data Protection Regulation</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">GLCM</term>
          <def>
            <p>gray-level co-occurrence matrix</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">HIPAA</term>
          <def>
            <p>Health Insurance Portability and Accountability Act</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">ICC</term>
          <def>
            <p>intraclass correlation coefficient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">IDL</term>
          <def>
            <p>interactive data language</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">ROC</term>
          <def>
            <p>receiver operating characteristic</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">ROI</term>
          <def>
            <p>region of interest</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Oren</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Gersh</surname>
              <given-names>BJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bhatt</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in medical imaging: switching from radiographic pathological data to clinically meaningful endpoints</article-title>
          <source>Lancet Digit Health</source>
          <year>2020</year>
          <volume>2</volume>
          <issue>9</issue>
          <fpage>e486</fpage>
          <lpage>e488</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S2589-7500(20)30160-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/S2589-7500(20)30160-6</pub-id>
          <pub-id pub-id-type="medline">33328116</pub-id>
          <pub-id pub-id-type="pii">S2589-7500(20)30160-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>HE</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>HH</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>BK</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Nam</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>EH</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Changes in cancer detection and false-positive recall in mammography using artificial intelligence: a retrospective, multireader study</article-title>
          <source>Lancet Digit Health</source>
          <year>2020</year>
          <volume>2</volume>
          <issue>3</issue>
          <fpage>e138</fpage>
          <lpage>e148</lpage>
          <pub-id pub-id-type="doi">10.1016/s2589-7500(20)30003-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van den Heuvel</surname>
              <given-names>TL</given-names>
            </name>
            <name name-style="western">
              <surname>van der Eerden</surname>
              <given-names>AW</given-names>
            </name>
            <name name-style="western">
              <surname>Manniesing</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ghafoorian</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Andriessen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Vande Vyvere</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>van den Hauwe</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ter Haar Romeny</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Goraj</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Platel</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Automated detection of cerebral microbleeds in patients with traumatic brain injury</article-title>
          <source>Neuroimage Clin</source>
          <year>2016</year>
          <volume>12</volume>
          <fpage>241</fpage>
          <lpage>251</lpage>
          <pub-id pub-id-type="doi">10.1016/j.nicl.2016.07.002</pub-id>
          <pub-id pub-id-type="medline">27489772</pub-id>
          <pub-id pub-id-type="pii">S2213-1582(16)30122-X</pub-id>
          <pub-id pub-id-type="pmcid">PMC4950582</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Becker</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Marcon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ghafoor</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wurnig</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Frauenfelder</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Boss</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep learning in mammography</article-title>
          <source>Invest Radiol</source>
          <year>2017</year>
          <volume>52</volume>
          <issue>7</issue>
          <fpage>434</fpage>
          <lpage>440</lpage>
          <pub-id pub-id-type="doi">10.1097/rli.0000000000000358</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Faes</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kale</surname>
              <given-names>AU</given-names>
            </name>
            <name name-style="western">
              <surname>Wagner</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bruynseels</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mahendiran</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Moraes</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Shamdas</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kern</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ledsam</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Schmid</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Balaskas</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Topol</surname>
              <given-names>EJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bachmann</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Keane</surname>
              <given-names>PA</given-names>
            </name>
            <name name-style="western">
              <surname>Denniston</surname>
              <given-names>AK</given-names>
            </name>
          </person-group>
          <article-title>A comparison of deep learning performance against health-care professionals in detecting diseases from medical imaging: a systematic review and meta-analysis</article-title>
          <source>Lancet Digit Health</source>
          <year>2019</year>
          <volume>1</volume>
          <issue>6</issue>
          <fpage>e271</fpage>
          <lpage>e297</lpage>
          <pub-id pub-id-type="doi">10.1016/s2589-7500(19)30123-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bello</surname>
              <given-names>GA</given-names>
            </name>
            <name name-style="western">
              <surname>Dawes</surname>
              <given-names>TJW</given-names>
            </name>
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Biffi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>de Marvao</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>LSGE</given-names>
            </name>
            <name name-style="western">
              <surname>Gibbs</surname>
              <given-names>JSR</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkins</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Cook</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Rueckert</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>O'Regan</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>Deep learning cardiac motion analysis for human survival prediction</article-title>
          <source>Nat Mach Intell</source>
          <year>2019</year>
          <volume>1</volume>
          <issue>2</issue>
          <fpage>95</fpage>
          <lpage>104</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30801055"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s42256-019-0019-2</pub-id>
          <pub-id pub-id-type="medline">30801055</pub-id>
          <pub-id pub-id-type="pmcid">PMC6382062</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosny</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Parmar</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Quackenbush</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>LH</given-names>
            </name>
            <name name-style="western">
              <surname>Aerts</surname>
              <given-names>HJWL</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in radiology</article-title>
          <source>Nat Rev Cancer</source>
          <year>2018</year>
          <volume>18</volume>
          <issue>8</issue>
          <fpage>500</fpage>
          <lpage>510</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/29777175"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41568-018-0016-5</pub-id>
          <pub-id pub-id-type="medline">29777175</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41568-018-0016-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC6268174</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Di Serafino</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Iacobellis</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Schillirò</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>D'auria</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Verde</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Grimaldi</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Dell'Aversano Orabona</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Caruso</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sabatino</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Rinaldo</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Guerriero</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Cantisani</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Vallone</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Romano</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Common and uncommon errors in emergency ultrasound</article-title>
          <source>Diagnostics (Basel)</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>3</issue>
          <fpage>631</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=diagnostics12030631"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/diagnostics12030631</pub-id>
          <pub-id pub-id-type="medline">35328184</pub-id>
          <pub-id pub-id-type="pii">diagnostics12030631</pub-id>
          <pub-id pub-id-type="pmcid">PMC8947314</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pinto</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pinto</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Faggian</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rubini</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Caranci</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Macarini</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Genovese</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Brunese</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Sources of error in emergency ultrasonography</article-title>
          <source>Crit Ultrasound J</source>
          <year>2013</year>
          <volume>5</volume>
          <issue>S1</issue>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1186/2036-7902-5-S1-S1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/2036-7902-5-s1-s1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Le</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Voigt</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Nathanson</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Maw</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Dancel</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mathews</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Moreira</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sauthoff</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gelabert</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kurian</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Dumovich</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Proud</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Solis-McCarthy</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Candotti</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Dayton</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Arena</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Boesch</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Flores</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Foster</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Villalobos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ortiz-Jaimes</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Mader</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sisson</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Soni</surname>
              <given-names>NJ</given-names>
            </name>
          </person-group>
          <article-title>Comparison of four handheld point-of-care ultrasound devices by expert users</article-title>
          <source>Ultrasound J</source>
          <year>2022</year>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>27</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35796842"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13089-022-00274-6</pub-id>
          <pub-id pub-id-type="medline">35796842</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13089-022-00274-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC9263020</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Malik</surname>
              <given-names>AN</given-names>
            </name>
            <name name-style="western">
              <surname>Rowland</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Haber</surname>
              <given-names>BD</given-names>
            </name>
            <name name-style="western">
              <surname>Thom</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jackson</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Volk</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Ehrman</surname>
              <given-names>RR</given-names>
            </name>
          </person-group>
          <article-title>The use of handheld ultrasound devices in emergency medicine</article-title>
          <source>Curr Emerg Hosp Med Rep</source>
          <year>2021</year>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>73</fpage>
          <lpage>81</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33996272"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s40138-021-00229-6</pub-id>
          <pub-id pub-id-type="medline">33996272</pub-id>
          <pub-id pub-id-type="pii">229</pub-id>
          <pub-id pub-id-type="pmcid">PMC8112245</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>YH</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in medical ultrasonography: driving on an unpaved road</article-title>
          <source>Ultrasonography</source>
          <year>2021</year>
          <volume>40</volume>
          <issue>3</issue>
          <fpage>313</fpage>
          <lpage>317</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34053212"/>
          </comment>
          <pub-id pub-id-type="doi">10.14366/usg.21031</pub-id>
          <pub-id pub-id-type="medline">34053212</pub-id>
          <pub-id pub-id-type="pii">usg.21031</pub-id>
          <pub-id pub-id-type="pmcid">PMC8217795</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Komatsu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sakai</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dozen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shozu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yasutomi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Machino</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Asada</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kaneko</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hamamoto</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Towards clinical application of artificial intelligence in ultrasound imaging</article-title>
          <source>Biomedicines</source>
          <year>2021</year>
          <volume>9</volume>
          <issue>7</issue>
          <fpage>720</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=biomedicines9070720"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/biomedicines9070720</pub-id>
          <pub-id pub-id-type="medline">34201827</pub-id>
          <pub-id pub-id-type="pii">biomedicines9070720</pub-id>
          <pub-id pub-id-type="pmcid">PMC8301304</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kayarian</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>O'Brien</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Schraft</surname>
              <given-names>EK</given-names>
            </name>
            <name name-style="western">
              <surname>Gottlieb</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and point-of-care ultrasound: benefits, limitations, and implications for the future</article-title>
          <source>Am J Emerg Med</source>
          <year>2024</year>
          <volume>80</volume>
          <fpage>119</fpage>
          <lpage>122</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ajem.2024.03.023</pub-id>
          <pub-id pub-id-type="medline">38555712</pub-id>
          <pub-id pub-id-type="pii">S0735-6757(24)00135-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dawkins</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in ultrasound imaging: where are we now?</article-title>
          <source>Ultrasound Q</source>
          <year>2024</year>
          <volume>40</volume>
          <issue>2</issue>
          <fpage>93</fpage>
          <lpage>97</lpage>
          <pub-id pub-id-type="doi">10.1097/RUQ.0000000000000680</pub-id>
          <pub-id pub-id-type="medline">38842384</pub-id>
          <pub-id pub-id-type="pii">00013644-202406000-00001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Recent advances in artificial intelligence-assisted ultrasound scanning</article-title>
          <source>Appl Sci</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>6</issue>
          <fpage>3693</fpage>
          <pub-id pub-id-type="doi">10.3390/app13063693</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dicle</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in diagnostic ultrasonography</article-title>
          <source>Diagn Interv Radiol</source>
          <year>2023</year>
          <volume>29</volume>
          <issue>1</issue>
          <fpage>40</fpage>
          <lpage>45</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36959754"/>
          </comment>
          <pub-id pub-id-type="doi">10.4274/dir.2022.211260</pub-id>
          <pub-id pub-id-type="medline">36959754</pub-id>
          <pub-id pub-id-type="pmcid">PMC10679601</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Else</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Abstracts written by ChatGPT fool scientists</article-title>
          <source>Nature</source>
          <year>2023</year>
          <volume>613</volume>
          <issue>7944</issue>
          <fpage>423</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/d41586-023-00056-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/d41586-023-00056-7</pub-id>
          <pub-id pub-id-type="medline">36635510</pub-id>
          <pub-id pub-id-type="pii">10.1038/d41586-023-00056-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dwivedi</surname>
              <given-names>YK</given-names>
            </name>
            <name name-style="western">
              <surname>Kshetri</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hughes</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Slade</surname>
              <given-names>El</given-names>
            </name>
            <name name-style="western">
              <surname>Jeyaraj</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kar</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Baabdullah</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Koohang</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Raghavan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ahuja</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Albanna</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Albashrawi</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Busaidi</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Balakrishnan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Barlette</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Basu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bose</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Brooks</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Buhalis</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chowdhury</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Crick</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cunningham</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Davies</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Davison</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Dé</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dennehy</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Duan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Dubey</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dwivedi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Flavián</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gauld</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Grover</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Janssen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Junglas</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Khorana</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kraus</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Larsen</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Latreille</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Laumer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Malik</surname>
              <given-names>FT</given-names>
            </name>
            <name name-style="western">
              <surname>Mardani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mariani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mithas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mogaji</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Nord</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>O’Connor</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Okumus</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Pagani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pandey</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Papagiannidis</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pappas</surname>
              <given-names>IO</given-names>
            </name>
            <name name-style="western">
              <surname>Pathak</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Pries-Heje</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Raman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Rana</surname>
              <given-names>NP</given-names>
            </name>
            <name name-style="western">
              <surname>Rehm</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ribeiro-Navarrete</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Richter</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rowe</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Sarker</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stahl</surname>
              <given-names>BC</given-names>
            </name>
            <name name-style="western">
              <surname>Tiwari</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>van der Aalst</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Venkatesh</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Viglia</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wade</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Walton</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wirtz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wright</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Opinion paper: “So what if ChatGPT wrote it?” multidisciplinary perspectives on opportunities, challenges and implications of generative conversational AI for research, practice and policy</article-title>
          <source>Int J Inf Manag</source>
          <year>2023</year>
          <volume>71</volume>
          <fpage>102642</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.ijinfomgt.2023.102642"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ijinfomgt.2023.102642</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kitamura</surname>
              <given-names>FC</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT is shaping the future of medical writing but still requires human judgment</article-title>
          <source>Radiology</source>
          <year>2023</year>
          <volume>307</volume>
          <issue>2</issue>
          <fpage>e230171</fpage>
          <pub-id pub-id-type="doi">10.1148/radiol.230171</pub-id>
          <pub-id pub-id-type="medline">36728749</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jeblick</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Arora</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT makes medicine easy to swallow: an exploratory case study on simplified radiology reports</article-title>
          <source>arXiv</source>
          <year>2023</year>
          <volume>34</volume>
          <issue>5</issue>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.48550/arXiv.2212.14882"/>
          </comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2212.14882</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alhasan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Tawfiq</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Aljamaan</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jamal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Eyadhy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Temsah</surname>
              <given-names>MH</given-names>
            </name>
          </person-group>
          <article-title>Mitigating the burden of severe pediatric respiratory viruses in the post-COVID-19 era: ChatGPT insights and recommendations</article-title>
          <source>Cureus</source>
          <year>2023</year>
          <volume>15</volume>
          <issue>3</issue>
          <fpage>e36263</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37073200"/>
          </comment>
          <pub-id pub-id-type="doi">10.7759/cureus.36263</pub-id>
          <pub-id pub-id-type="medline">37073200</pub-id>
          <pub-id pub-id-type="pmcid">PMC10105647</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>SR</given-names>
            </name>
            <name name-style="western">
              <surname>Dobbs</surname>
              <given-names>TD</given-names>
            </name>
            <name name-style="western">
              <surname>Hutchings</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Whitaker</surname>
              <given-names>IS</given-names>
            </name>
          </person-group>
          <article-title>Using ChatGPT to write patient clinic letters</article-title>
          <source>Lancet Digit Health</source>
          <year>2023</year>
          <volume>5</volume>
          <issue>4</issue>
          <fpage>e179</fpage>
          <lpage>e181</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/S2589-7500(23)00048-1"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/s2589-7500(23)00048-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Santandreu-Calonge</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ortiz-Martinez</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Agusti-Toro</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Can ChatGPT improve communication in hospitals?</article-title>
          <source>Profesional de la información</source>
          <year>2023</year>
          <volume>32</volume>
          <issue>2</issue>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3145/epi.2023.mar.19"/>
          </comment>
          <pub-id pub-id-type="doi">10.3145/epi.2023.mar.19</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Biswas</surname>
              <given-names>SS</given-names>
            </name>
          </person-group>
          <article-title>Role of ChatGPT in radiology with a focus on pediatric radiology: proof by examples</article-title>
          <source>Pediatr Radiol</source>
          <year>2023</year>
          <volume>53</volume>
          <issue>5</issue>
          <fpage>818</fpage>
          <lpage>822</lpage>
          <pub-id pub-id-type="doi">10.1007/s00247-023-05675-w</pub-id>
          <pub-id pub-id-type="medline">37106089</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00247-023-05675-w</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kamineni</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lie</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Succi</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>Evaluating ChatGPT as an adjunct for radiologic decision-making</article-title>
          <source>medRxiv</source>
          <year>2023</year>
          <fpage>2023</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1101/2023.02.02.23285399"/>
          </comment>
          <pub-id pub-id-type="doi">10.1101/2023.02.02.23285399</pub-id>
          <pub-id pub-id-type="medline">36798292</pub-id>
          <pub-id pub-id-type="pii">2023.02.02.23285399</pub-id>
          <pub-id pub-id-type="pmcid">PMC9934725</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wiggers</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>OpenAI releases GPT-4, a multimodal AI that it claims is state-of-the-art?</article-title>
          <source>TechCrunch</source>
          <year>2023</year>
          <access-date>2023-03-15</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://techcrunch.com/2023/03/14/openai-releases-gpt-4-ai-that-it-claims-is-state-of-the-art/">https://techcrunch.com/2023/03/14/openai-releases-gpt-4-ai-that-it-claims-is-state-of-the-art/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bubeck</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Varun</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ronen</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Johannes</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Eric</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ece</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Peter</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>TL</given-names>
            </name>
            <name name-style="western">
              <surname>Yuanzhi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Scott;</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Harsha</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hamid</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Marco</surname>
              <given-names>TZ</given-names>
            </name>
          </person-group>
          <article-title>Sparks of artificial general intelligencearly experiments with GPT-4?</article-title>
          <source>arXiv.12712cs.CL</source>
          <year>2023</year>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/publication/369449949_Sparks_of_Artificial_General_Intelligence_Early_experiments_with_GPT-4"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sultan</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Mohamed</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Andronikou</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT-4: a breakthrough in ultrasound image analysis</article-title>
          <source>Radiol Adv</source>
          <year>2024</year>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>umae006</fpage>
          <pub-id pub-id-type="doi">10.1093/radadv/umae006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xing</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>An update on animal models of liver fibrosis</article-title>
          <source>Front Med (Lausanne)</source>
          <year>2023</year>
          <volume>10</volume>
          <fpage>1160053</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37035335"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fmed.2023.1160053</pub-id>
          <pub-id pub-id-type="medline">37035335</pub-id>
          <pub-id pub-id-type="pmcid">PMC10076546</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>D'Souza</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Sultan</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Hunt</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Schultz</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Brice</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Wood</surname>
              <given-names>AKW</given-names>
            </name>
            <name name-style="western">
              <surname>Sehgal</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>B-mode ultrasound for the assessment of hepatic fibrosis: a quantitative multiparametric analysis for a radiomics approach</article-title>
          <source>Sci Rep</source>
          <year>2019</year>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>8708</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-019-45043-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-019-45043-z</pub-id>
          <pub-id pub-id-type="medline">31213661</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-019-45043-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC6581954</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sultan</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Cary</surname>
              <given-names>TW</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Hasani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Karmacharya</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Venkatesh</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Assenmacher</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Radaelli</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Sehgal</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>Can sequential images from the same object be used for training machine learning models? a case study for detecting liver disease by ultrasound radiomics</article-title>
          <source>AI (Basel)</source>
          <year>2022</year>
          <volume>3</volume>
          <issue>3</issue>
          <fpage>739</fpage>
          <lpage>750</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36168560"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/ai3030043</pub-id>
          <pub-id pub-id-type="medline">36168560</pub-id>
          <pub-id pub-id-type="pmcid">PMC9511699</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Al-Hasani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sultan</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Sagreiya</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cary</surname>
              <given-names>TW</given-names>
            </name>
            <name name-style="western">
              <surname>Karmacharya</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Sehgal</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>Ultrasound radiomics for the detection of early-stage liver fibrosis</article-title>
          <source>Diagnostics (Basel)</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>11</issue>
          <fpage>2737</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=diagnostics12112737"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/diagnostics12112737</pub-id>
          <pub-id pub-id-type="medline">36359580</pub-id>
          <pub-id pub-id-type="pii">diagnostics12112737</pub-id>
          <pub-id pub-id-type="pmcid">PMC9689042</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>YY</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>KC</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Yeh</surname>
              <given-names>CK</given-names>
            </name>
          </person-group>
          <article-title>Multifeature analysis of an ultrasound quantitative diagnostic index for classifying nonalcoholic fatty liver disease</article-title>
          <source>Sci Rep</source>
          <year>2016</year>
          <volume>6</volume>
          <fpage>35083</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/srep35083"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/srep35083</pub-id>
          <pub-id pub-id-type="medline">27734972</pub-id>
          <pub-id pub-id-type="pii">srep35083</pub-id>
          <pub-id pub-id-type="pmcid">PMC5062088</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Suganya</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Rajaram</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Feature extraction and classification of ultrasound liver images using haralick texture-primitive features: application of SVM classifier</article-title>
          <year>2013</year>
          <conf-name>2013 International Conference on Recent Trends in Information Technology (ICRTIT)</conf-name>
          <conf-date>2013 July 25</conf-date>
          <conf-loc>Chennai, India</conf-loc>
          <fpage>596</fpage>
          <lpage>602</lpage>
          <pub-id pub-id-type="doi">10.1109/icrtit.2013.6844269</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hastie</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Tibshirani</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Friedman</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>The elements of statistical learning</article-title>
          <source>Data Mining, Inference, and Prediction</source>
          <year>2009</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosmer</surname>
              <given-names>DW</given-names>
            </name>
            <name name-style="western">
              <surname>Lemeshow</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sturdivant</surname>
              <given-names>RX</given-names>
            </name>
          </person-group>
          <source>Applied Logistic Regression</source>
          <year>2013</year>
          <publisher-loc>USA</publisher-loc>
          <publisher-name>John Wiley &amp; Sons</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sultan</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>YT</given-names>
            </name>
            <name name-style="western">
              <surname>Cary</surname>
              <given-names>TW</given-names>
            </name>
            <name name-style="western">
              <surname>Ashi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sehgal</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>Quantitative pleural line characterization outperforms traditional lung texture ultrasound features in detection of COVID-19</article-title>
          <source>J Am Coll Emerg Physicians Open</source>
          <year>2021</year>
          <volume>2</volume>
          <issue>2</issue>
          <fpage>e12418</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/EMP212418"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/emp2.12418</pub-id>
          <pub-id pub-id-type="medline">33842925</pub-id>
          <pub-id pub-id-type="pii">EMP212418</pub-id>
          <pub-id pub-id-type="pmcid">PMC8018308</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mese</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Taslicay</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Sivrioglu</surname>
              <given-names>AK</given-names>
            </name>
          </person-group>
          <article-title>Improving radiology workflow using ChatGPT and artificial intelligence</article-title>
          <source>Clin Imaging</source>
          <year>2023</year>
          <volume>103</volume>
          <fpage>109993</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.clinimag.2023.109993"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.clinimag.2023.109993</pub-id>
          <pub-id pub-id-type="medline">37812965</pub-id>
          <pub-id pub-id-type="pii">S0899-7071(23)00213-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lyu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zapadka</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Ponnatapura</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Niu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Myers</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Whitlow</surname>
              <given-names>CT</given-names>
            </name>
          </person-group>
          <article-title>Translating radiology reports into plain language using ChatGPT and GPT-4 with prompt learning: results, limitations, and potential</article-title>
          <source>Vis Comput Ind Biomed Art</source>
          <year>2023</year>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>9</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37198498"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s42492-023-00136-5</pub-id>
          <pub-id pub-id-type="medline">37198498</pub-id>
          <pub-id pub-id-type="pii">10.1186/s42492-023-00136-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC10192466</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Elkassem</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>AD</given-names>
            </name>
          </person-group>
          <article-title>Potential use cases for ChatGPT in radiology reporting</article-title>
          <source>Am J Roentgenol</source>
          <year>2023</year>
          <volume>221</volume>
          <issue>3</issue>
          <fpage>373</fpage>
          <lpage>376</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.2214/AJR.23.29198"/>
          </comment>
          <pub-id pub-id-type="doi">10.2214/ajr.23.29198</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Byra</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Styczynski</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Szmigielski</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kalinowski</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Michałowski</surname>
              <given-names>Ł</given-names>
            </name>
            <name name-style="western">
              <surname>Paluszkiewicz</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ziarkiewicz-Wróblewska</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zieniewicz</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sobieraj</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Nowicki</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Transfer learning with deep convolutional neural network for liver steatosis assessment in ultrasound images</article-title>
          <source>Int J Comput Assist Radiol Surg</source>
          <year>2018</year>
          <month>12</month>
          <volume>13</volume>
          <issue>12</issue>
          <fpage>1895</fpage>
          <lpage>1903</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30094778"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11548-018-1843-2</pub-id>
          <pub-id pub-id-type="medline">30094778</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11548-018-1843-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC6223753</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Jeon</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Joo</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Quantitative evaluation of hepatic steatosis using advanced imaging techniques: focusing on new quantitative ultrasound techniques</article-title>
          <source>Korean J Radiol</source>
          <year>2022</year>
          <month>01</month>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>13</fpage>
          <lpage>29</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.kjronline.org/DOIx.php?id=10.3348/kjr.2021.0112"/>
          </comment>
          <pub-id pub-id-type="doi">10.3348/kjr.2021.0112</pub-id>
          <pub-id pub-id-type="medline">34983091</pub-id>
          <pub-id pub-id-type="pii">23.13</pub-id>
          <pub-id pub-id-type="pmcid">PMC8743150</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X-J</given-names>
            </name>
          </person-group>
          <article-title>Role of radiomics in staging liver fibrosis: a meta-analysis</article-title>
          <source>BMC Med Imaging</source>
          <year>2024</year>
          <month>04</month>
          <day>12</day>
          <volume>24</volume>
          <issue>1</issue>
          <fpage>87</fpage>
          <lpage>554</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedimaging.biomedcentral.com/articles/10.1186/s12880-024-01272-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12880-024-01272-x</pub-id>
          <pub-id pub-id-type="medline">38609843</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12880-024-01272-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC11010385</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Traverso</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wee</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Dekker</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gillies</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Repeatability and reproducibility of radiomic features: a systematic review</article-title>
          <source>Int J Radiat Oncol Biol Phys</source>
          <year>2018</year>
          <volume>102</volume>
          <issue>4</issue>
          <fpage>1143</fpage>
          <lpage>1158</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0360-3016(18)30905-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ijrobp.2018.05.053</pub-id>
          <pub-id pub-id-type="medline">30170872</pub-id>
          <pub-id pub-id-type="pii">S0360-3016(18)30905-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC6690209</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>HMT</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>HYC</given-names>
            </name>
            <name name-style="western">
              <surname>Varghese</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Donovan</surname>
              <given-names>EM</given-names>
            </name>
            <name name-style="western">
              <surname>South</surname>
              <given-names>CP</given-names>
            </name>
            <name name-style="western">
              <surname>Saxby</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nisbet</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Prakash</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Sasidharan</surname>
              <given-names>BK</given-names>
            </name>
            <name name-style="western">
              <surname>Pavamani</surname>
              <given-names>SP</given-names>
            </name>
            <name name-style="western">
              <surname>Devadhas</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mathew</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Isiah</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Evans</surname>
              <given-names>PM</given-names>
            </name>
          </person-group>
          <article-title>Reproducibility in radiomics: a comparison of feature extraction methods and two independent datasets</article-title>
          <source>Appl Sci (Basel)</source>
          <year>2023</year>
          <volume>166</volume>
          <issue>1</issue>
          <fpage>7291</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38725869"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/app13127291</pub-id>
          <pub-id pub-id-type="medline">38725869</pub-id>
          <pub-id pub-id-type="pii">s00701-024-05977-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC7615943</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Radford</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Child</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Luan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Amodei</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Language models are unsupervised multitask learners</article-title>
          <source>OpenAI Blog</source>
          <year>2019</year>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.bibsonomy.org/bibtex/1b926ece39c03cdf5499f6540cf63babd"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>OpenAI</collab>
          </person-group>
          <article-title>GPT-4 technical report</article-title>
          <source>arXiv</source>
          <year>2023</year>
          <access-date>2025-05-21</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2303.08774">https://arxiv.org/abs/2303.08774</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Beckmann</surname>
              <given-names>CF</given-names>
            </name>
            <name name-style="western">
              <surname>Ramnani</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Woolrich</surname>
              <given-names>MW</given-names>
            </name>
            <name name-style="western">
              <surname>Bannister</surname>
              <given-names>PR</given-names>
            </name>
            <name name-style="western">
              <surname>Jenkinson</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Matthews</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>McGonigle</surname>
              <given-names>DJ</given-names>
            </name>
          </person-group>
          <article-title>Variability in fMRI: a re-examination of inter-session differences</article-title>
          <source>Hum Brain Mapp</source>
          <year>2005</year>
          <volume>24</volume>
          <issue>3</issue>
          <fpage>248</fpage>
          <lpage>257</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/15654698"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/hbm.20080</pub-id>
          <pub-id pub-id-type="medline">15654698</pub-id>
          <pub-id pub-id-type="pmcid">PMC6871748</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Albadawy</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Saha</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Harowicz</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Mazurowski</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Deep learning for identifying radiogenomic associations in breast cancer</article-title>
          <source>Comput Biol Med</source>
          <year>2019</year>
          <month>06</month>
          <volume>109</volume>
          <issue>3</issue>
          <fpage>85</fpage>
          <lpage>90</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31048129"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2019.04.018</pub-id>
          <pub-id pub-id-type="medline">31048129</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(19)30126-X</pub-id>
          <pub-id pub-id-type="pmcid">PMC7155381</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mazurowski</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Buda</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Saha</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bashir</surname>
              <given-names>MR</given-names>
            </name>
          </person-group>
          <article-title>Deep learning in radiology: an overview of the concepts and a survey of the state of the art with focus on MRI</article-title>
          <source>J Magn Reson Imaging</source>
          <year>2019</year>
          <volume>49</volume>
          <issue>4</issue>
          <fpage>939</fpage>
          <lpage>954</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30575178"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/jmri.26534</pub-id>
          <pub-id pub-id-type="medline">30575178</pub-id>
          <pub-id pub-id-type="pmcid">PMC6483404</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ardila</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kiraly</surname>
              <given-names>AP</given-names>
            </name>
            <name name-style="western">
              <surname>Bharadwaj</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Reicher</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tse</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Etemadi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Naidich</surname>
              <given-names>DP</given-names>
            </name>
            <name name-style="western">
              <surname>Shetty</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>End-to-end lung cancer screening with three-dimensional deep learning on low-dose chest computed tomography</article-title>
          <source>Nat Med</source>
          <year>2019</year>
          <volume>25</volume>
          <issue>6</issue>
          <fpage>954</fpage>
          <lpage>961</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-019-0447-x</pub-id>
          <pub-id pub-id-type="medline">31110349</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-019-0447-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McBee</surname>
              <given-names>MP</given-names>
            </name>
            <name name-style="western">
              <surname>Awan</surname>
              <given-names>OA</given-names>
            </name>
            <name name-style="western">
              <surname>Colucci</surname>
              <given-names>AT</given-names>
            </name>
            <name name-style="western">
              <surname>Ghobadi</surname>
              <given-names>CW</given-names>
            </name>
            <name name-style="western">
              <surname>Kadom</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Kansagra</surname>
              <given-names>AP</given-names>
            </name>
            <name name-style="western">
              <surname>Tridandapani</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Auffermann</surname>
              <given-names>WF</given-names>
            </name>
          </person-group>
          <article-title>Deep learning in radiology: a review of current applications and future directions</article-title>
          <source>Acad Radiol</source>
          <year>2018</year>
          <volume>25</volume>
          <issue>11</issue>
          <fpage>1472</fpage>
          <lpage>1480</lpage>
          <pub-id pub-id-type="doi">10.1016/j.acra.2018.02.018</pub-id>
          <pub-id pub-id-type="medline">29606338</pub-id>
          <pub-id pub-id-type="pii">S1076-6332(18)30104-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Uncertainty-aware AI in medical decision support: a review of bayesian and probabilistic approaches</article-title>
          <source>J Med AI</source>
          <year>2023</year>
          <volume>5</volume>
          <issue>2</issue>
          <fpage>120</fpage>
          <lpage>135</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gal</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ghahramani</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Dropout as a bayesian approximation: representing model uncertainty in deep learning</article-title>
          <year>2016</year>
          <conf-name>Proceedings of the 33rd International Conference on Machine Learning (ICML)</conf-name>
          <conf-date>2016 June 19</conf-date>
          <conf-loc>New York, NY</conf-loc>
          <fpage>1050</fpage>
          <lpage>1059</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tiu</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Talius</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Langlotz</surname>
              <given-names>CP</given-names>
            </name>
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>AY</given-names>
            </name>
            <name name-style="western">
              <surname>Rajpurkar</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Expert-level detection of pathologies from unannotated chest X-ray images via self-supervised learning</article-title>
          <source>Nat Biomed Eng</source>
          <year>2022</year>
          <month>12</month>
          <volume>6</volume>
          <issue>12</issue>
          <fpage>1399</fpage>
          <lpage>1406</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36109605"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41551-022-00936-9</pub-id>
          <pub-id pub-id-type="medline">36109605</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41551-022-00936-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC9792370</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Holzinger</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Carrington</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Measuring the quality of explanations: the system causability scale (SCS): comparing human and machine explanations</article-title>
          <source>Kunstliche Intell (Oldenbourg)</source>
          <year>2020</year>
          <month>05</month>
          <day>31</day>
          <volume>34</volume>
          <issue>2</issue>
          <fpage>193</fpage>
          <lpage>198</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32549653"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s13218-020-00636-z</pub-id>
          <pub-id pub-id-type="medline">32549653</pub-id>
          <pub-id pub-id-type="pii">636</pub-id>
          <pub-id pub-id-type="pmcid">PMC7271052</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yue</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in imaging for liver disease diagnosis</article-title>
          <source>Front Med (Lausanne)</source>
          <year>2025</year>
          <volume>12</volume>
          <issue>4</issue>
          <fpage>1591523</fpage>
          <lpage>1215</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fmed.2025.1591523"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fmed.2025.1591523</pub-id>
          <pub-id pub-id-type="medline">40351457</pub-id>
          <pub-id pub-id-type="pmcid">PMC12062035</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Idrees</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Transfer learning in natural language processing: a game-changer for AI models</article-title>
          <source>Medium</source>
          <year>2023</year>
          <access-date>2025-02-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medium.com/@hassaanidrees7/transfer-learning-in-natural-language-processing-nlp-a-game-changer-for-ai-models-b8739274bb02">https://tinyurl.com/36d7se5y</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rajpurkar</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Banerjee</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Topol</surname>
              <given-names>Ej</given-names>
            </name>
          </person-group>
          <article-title>AI in health and medicine</article-title>
          <source>Nat Med</source>
          <year>2022</year>
          <month>01</month>
          <day>20</day>
          <volume>28</volume>
          <issue>1</issue>
          <fpage>31</fpage>
          <lpage>38</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-021-01614-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Niu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Bai</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Modelling patient longitudinal data for clinical decision support: a case study on emerging ai healthcare technologies</article-title>
          <source>Inf Syst Front</source>
          <year>2024</year>
          <month>07</month>
          <day>18</day>
          <volume>27</volume>
          <issue>2</issue>
          <fpage>409</fpage>
          <lpage>427</lpage>
          <pub-id pub-id-type="doi">10.1007/s10796-024-10513-x</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
