<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR AI</journal-id><journal-id journal-id-type="publisher-id">ai</journal-id><journal-id journal-id-type="index">41</journal-id><journal-title>JMIR AI</journal-title><abbrev-journal-title>JMIR AI</abbrev-journal-title><issn pub-type="epub">2817-1705</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v5i1e74144</article-id><article-id pub-id-type="doi">10.2196/74144</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>A Qualitative Exploration of Ethical Aspects of Using AI in Parkinson Disease: Patient Panel Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Luckhaus</surname><given-names>Jamie Linnea</given-names></name><degrees>MPH</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Scott Duncan</surname><given-names>Therese</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Kharko</surname><given-names>Anna</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Clareborn</surname><given-names>Anna</given-names></name><degrees>MA</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>H&#x00E4;gglund</surname><given-names>Maria</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Blease</surname><given-names>Charlotte</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Riggare</surname><given-names>Sara</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff5">5</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Women's and Children's Health, Participatory eHealth and Health Data, Disciplinary Domain of Medicine and Pharmacy, Uppsala University</institution><addr-line>MTC-huset, Dag Hammarskj&#x00F6;lds v&#x00E4;g 14B, 1 tr</addr-line><addr-line>Uppsala</addr-line><country>Sweden</country></aff><aff id="aff2"><institution>Department of Immunology, Genetics and Pathology, Uppsala University</institution><addr-line>Uppsala</addr-line><country>Sweden</country></aff><aff id="aff3"><institution>Department of Medical Sciences, Uppsala University Hospital</institution><addr-line>Uppsala</addr-line><country>Sweden</country></aff><aff id="aff4"><institution>Department of Psychiatry, Beth Israel Deaconess Medical Center, Harvard University</institution><addr-line>Boston</addr-line><country>United States</country></aff><aff id="aff5"><institution>Center for Disability Research, Disciplinary Domain of Medicine and Pharmacy, Uppsala University Hospital</institution><addr-line>Uppsala</addr-line><country>Sweden</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Blasimme</surname><given-names>Alessandro</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Lee</surname><given-names>Juhee</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Martins-Conde</surname><given-names>Patricia</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Robinson</surname><given-names>Peter</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Lee</surname><given-names>Won Bok</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Jamie Linnea Luckhaus, MPH, Department of Women's and Children's Health, Participatory eHealth and Health Data, Disciplinary Domain of Medicine and Pharmacy, Uppsala University, MTC-huset, Dag Hammarskj&#x00F6;lds v&#x00E4;g 14B, 1 tr, Uppsala, Sweden, 46 0769434111; <email>jamie.luckhaus@uu.se</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>28</day><month>4</month><year>2026</year></pub-date><volume>5</volume><elocation-id>e74144</elocation-id><history><date date-type="received"><day>20</day><month>03</month><year>2025</year></date><date date-type="rev-recd"><day>06</day><month>10</month><year>2025</year></date><date date-type="accepted"><day>26</day><month>03</month><year>2026</year></date></history><copyright-statement>&#x00A9; Jamie Linnea Luckhaus, Therese Scott Duncan, Anna Kharko, Anna Clareborn, Maria H&#x00E4;gglund, Charlotte Blease, Sara Riggare. Originally published in JMIR AI (<ext-link ext-link-type="uri" xlink:href="https://ai.jmir.org">https://ai.jmir.org</ext-link>), 28.4.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR AI, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.ai.jmir.org/">https://www.ai.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://ai.jmir.org/2026/1/e74144"/><abstract><sec><title>Background</title><p>As Parkinson disease (PD) rates increase, so does interest in finding new technological solutions for PD management. Despite substantial efforts to explore potential applications of artificial intelligence (AI) in PD management, research from the perspectives of people with PD on AI remains limited.</p></sec><sec><title>Objective</title><p>This study aims to explore the ethical considerations of AI in PD management from the perspective of people with PD.</p></sec><sec sec-type="methods"><title>Methods</title><p>A qualitative triangulation of 13 interviews and 2 focus groups (FGs) with a panel of expert-by-experience people with PD from 6 European countries was carried out using abductive thematic analysis. The 6 biomedical ethical principles conceptualized by Beauchamp and Childress guided the analysis. Participants varied in diagnosis, disease experiences, and technological backgrounds. A researcher with PD was involved from start to finish, providing valuable insights into data collection and analysis.</p></sec><sec sec-type="results"><title>Results</title><p>Although optimistic that AI could enhance autonomy and beneficence through personalized, actionable insights for people with PD and their health care professionals, concerns arose over patient involvement, model accuracy and privacy, ethical injustices, and the psychological impact. Risk prediction, prognosis, and medication response were viewed differently in terms of potential value and ethical considerations, with risk prediction being perceived as the most ethically complex. To uphold autonomy, it was considered important for AI insights to be patient-accessible, and sensitive insights should be communicated by a health care professional who recognizes individual differences in desiring and responding to AI predictions.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>While people with PD felt AI could personalize (self-)care and increase autonomy, concerns about psychological harm and widening inequalities highlight the importance of ethical safeguards. Our findings underscore the importance of AI integrations that prioritize individual needs, actively engage people with PD in the development, implementation, and interpretation of predictive AI, and establish guidelines to support health care professionals and minimize patient harm. Different forms of implementation and precautions should be taken for risk, progression, and medication response prediction.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>AI</kwd><kwd>co-design</kwd><kwd>medical ethics</kwd><kwd>biomedical ethical principles</kwd><kwd>Parkinson disease</kwd><kwd>predictive medicine</kwd><kwd>precision medicine</kwd><kwd>user perceptions</kwd><kwd>qualitative study</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Artificial intelligence (AI) and its promise of delivering individually tailored health care is gaining traction, especially for chronic conditions such as Parkinson disease (PD), the fastest growing neurodegenerative condition [<xref ref-type="bibr" rid="ref1">1</xref>]. There are numerous challenges in diagnosing and managing PD, including overlapping symptoms with other diseases, the presentation of nonmotor prodromal symptoms years before cardinal symptoms, and variability in disease presentation and medication response across individuals and over the course of the disease [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>].</p><p>Many proof-of-concept studies on PD monitoring, clinical decision-making, and diagnostic technologies through analyzing digital biomarkers have been published recently. These include AI to detect PD and predict progression [<xref ref-type="bibr" rid="ref4">4</xref>], assess motor states over the course of a levodopa dose [<xref ref-type="bibr" rid="ref5">5</xref>], and classify ON-OFF fluctuations [<xref ref-type="bibr" rid="ref6">6</xref>]. The idea behind applying predictive AI, which forecasts future events or outcomes based on historical trends, is ultimately to provide more timely and individually tailored treatments than a clinician could do on their own.</p><p>Rapid eye movement sleep behavior disorder (RBD) is a possible prodromal marker of neurodegenerative diseases [<xref ref-type="bibr" rid="ref7">7</xref>], making this patient group a target for PD risk monitoring, with hopes of early neuroprotective interventions as well as learning more about prodromal PD. However, with PD being progressive with no cure, predicting risk comes with ethical considerations such as psychological distress and stigmatization, especially given the high uncertainty of current model calculations [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Although neuroprotective therapies to slow progression are under development [<xref ref-type="bibr" rid="ref10">10</xref>] and modifiable lifestyle factors such as exercise have also been suggested to play an important role in symptom management [<xref ref-type="bibr" rid="ref11">11</xref>], a PD diagnosis would, for most, constitute life-altering news. A study on whether and how to disclose PD risk to patients with RBD suggests that this depends on patient subtype and must respect individual needs and wishes [<xref ref-type="bibr" rid="ref9">9</xref>]. Literature on the development of technologies for PD highlights the importance of involving people with PD in the design process to ensure alignment with real-world problems [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. Leveraging experiential knowledge through a co-design approach helps capture the psychological, social, and medical factors of the user experience, resulting in sustained adoption [<xref ref-type="bibr" rid="ref13">13</xref>].</p><p>The limited research on AI-assisted monitoring from the perspective of people with PD has found multilevel ethical concerns regarding AI prognostics: on personal, interpersonal, professional or institutional, and societal levels [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. As AI use for PD increases, it is essential to explore the ethical dimensions from the users&#x2019; perspective, including perceptions of benefits, potential harms, privacy risks, and broader concerns. A framework that can help map ethical considerations is the bioethics framework [<xref ref-type="bibr" rid="ref15">15</xref>] (<xref ref-type="fig" rid="figure1">Figure 1</xref>), which has previously been used to contextualize and evaluate the impact of digital interventions on patient autonomy, beneficence, nonmaleficence, justice, truthfulness, and confidentiality [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. These considerations are crucial for the development and implementation of such tools, to increase value and avoid harm for potential users. We, therefore, aim to explore the ethical considerations of using AI in PD management from the perspective of people with PD.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>The 6 bioethical principles and their definitions (adapted from Beauchamp and Childress [<xref ref-type="bibr" rid="ref15">15</xref>]).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e74144_fig01.png"/></fig></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Design and Setting</title><p>This study presents findings from qualitative data collected within the European Union (EU)&#x2013;funded project: &#x201C;AI-PROGNOSIS: Artificial intelligence&#x2013;based Parkinson&#x2019;s disease risk assessment and prognosis&#x201D; (Nr.101080581). To provide a perspective of people with PD throughout the project, an international patient panel was assembled in late 2023, which served as the participant group. The panel was recruited using a combination of convenience, purposive, and snowball sampling to reach multiple English-speaking people with PD from each of the 6 countries with an appropriate background. People with PD were selected based on their high PD knowledge levels, high levels of engagement in PD communities, and interest in technology, while at the same time maintaining diversity in age, gender, country, time since diagnosis, and educational or professional background. A few participants had prior (professional) knowledge of AI. An hour-long information meeting was held online for those interested prior to officially inviting participants, and those who missed were sent the recording.</p><p>We adopted participatory and co-design methodologies, which are important in the development of digital health tools, ensuring that the technology is useful and user-centered [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>]. A researcher with PD was actively involved in all phases of the study: planning and design, data collection and analysis, and writing.</p><p>The 3 models under development within AI-PROGNOSIS, which were discussed with the panel, were [<xref ref-type="bibr" rid="ref1">1</xref>] a risk score, predicting one&#x2019;s risk of developing PD among individuals clinically identified as at risk [<xref ref-type="bibr" rid="ref2">2</xref>]; a prognosis, predicting the progression of people with PD; and [<xref ref-type="bibr" rid="ref3">3</xref>] medication response, predicting the response of people with PD to various medications and timings or doses.</p></sec><sec id="s2-2"><title>Data Collection</title><p>The participants (patient panel) consisted of 14 people with PD (5 women and 9 men) from 6 countries: France (n=2), Germany (n=3), Portugal (n=3), Spain (n=1), Sweden (n=2), and the United Kingdom (n=3). The mean age at the time of data collection was 53.5 (SD 11.4; range 40&#x2010;75) years. The mean age at the time of PD diagnosis was 44.5 (SD 11.8; range 32&#x2010;64) years, and the mean number of years living with PD was 9 (SD 5; range 1&#x2010;17). All were of Caucasian ethnicity.</p><p>Semistructured interviews (January-March 2024) at an average of 36 minutes (range 13&#x2010;57 min) were conducted by JLL with 13 out of 14 panel members. One panel member chose not to interview but participated in an FG. The interview guide (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) covered four topics: (1) personal and PD background; (2) prior research participation (if any); (3) expectations, concerns, and hopes regarding this project; and (4) views on AI in health care or research in general and in AI-PROGNOSIS. No introduction to AI was provided prior to the interviews, in order to capture initial thoughts and expectations, as these were the first interviews of the project.</p><p>An introduction to the project&#x2019;s AI tools and key development terms (eg, high-level features, user-needs, explainable insights) was given before each of the 2 FGs, however, to facilitate deeper and well-informed discussion (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>). The FGs were attended by 9 or 14 panel members and were facilitated by SR and AC in February 2024. The FGs were held on 2 occasions to increase participation while keeping the size small enough for meaningful conversation, and participants signed up based on their schedule. Only the 2 most popular times were selected, meaning not all participated. Additionally, the 2 French participants joined the project after the FGs due to English language recruitment challenges. The FG objective was to elicit user needs for the project&#x2019;s development and was held ahead of prototypes, so participants spoke hypothetically. For the sake of the discussion, participants were asked to respond with the assumption that the AI models would work perfectly. The content was similar between the two (the guide is provided in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>), although the facilitators gave the second FG more time on questions unanswered by the first to ensure data on all topics of interest. FG 1 (1 hour 48 min) consisted of 2 female and 2 male panel members from 3 different countries, and FG 2 (1 hour 43 min) consisted of different participants: 3 female and 2 male panel members from 4 countries. The participants were divided with availability and diversity in mind. Each FG consisted of a primary and secondary facilitator, as well as 1 additional project member, who facilitated note-taking. All data were collected over Zoom in English, with occasional clarifications as necessary in participants&#x2019; native language to maximize participation and understanding.</p></sec><sec id="s2-3"><title>Data Analysis</title><p>The interviews and FGs were analyzed abductively as 1 dataset, using a simplified version of Thompson&#x2019;s 8-step abductive thematic analysis (<xref ref-type="fig" rid="figure2">Figure 2</xref>) [<xref ref-type="bibr" rid="ref20">20</xref>]. This method of abductive reasoning, shown in <xref ref-type="fig" rid="figure3">Figure 3</xref>, involves iteratively moving between empirical data and theoretical framework to refine interpretations [<xref ref-type="bibr" rid="ref21">21</xref>]. JLL familiarized herself with the data by transcribing the interviews and FGs and reading the transcripts. She then conducted inductive coding in Taguette, using &#x201C;considerations for AI in managing PD&#x201D; as an initial analytic lens. Codes were low-level descriptors of salient statements emerging from the text. The codes were then transferred into Miro, a collaborative online &#x201C;whiteboard,&#x201D; where codes were inductively grouped into preliminary themes and discussed between authors. An overarching ethical dimension emerged, prompting the abductive step of refining the research focus toward ethical considerations and the application of biomedical ethical principles [<xref ref-type="bibr" rid="ref15">15</xref>] to deductively categorize the codes or preliminary themes (<xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>). One author (CB), an expert in biomedical ethics, provided critical input to ensure alignment between themes and principles. JLL and SR finalized the analysis in a reflexive process, revisiting the transcripts, discussing, and revising accordingly. The data were not constrained by the framework, and a section highlighting outliers is also included. Due to the elements of trust in both truthfulness and confidentiality, and the smaller themes within those two principles, we combined all 3 for the presentation of our findings.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Our 7-step analysis process. Red boxes indicate differences from the 8-step abductive thematic analysis by Thompson [<xref ref-type="bibr" rid="ref20">20</xref>]<italic>.</italic> AI: artificial intelligence; PD: Parkinson disease.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e74144_fig02.png"/></fig><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Mapping of our abductive thematic analysis approach. AI: artificial intelligence; PD: Parkinson&#x2019;s disease.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="ai_v5i1e74144_fig03.png"/></fig></sec><sec id="s2-4"><title>Ethical Considerations</title><p>Participants were informed about the study aims and procedures, including data collection and processing, potential risks related to participation, and their rights under the General Data Protection Regulation (GDPR). The data were de&#x2011;identified prior to analysis and reporting. Direct identifiers (e.g., names) and potentially identifying information such as age were removed. When presenting quotations, only broad demographic characteristics were retained to minimize the risk of re&#x2011;identification. Access to the raw data was restricted to the research team, and all data were stored on secure, password&#x2011;protected systems in accordance with GDPR requirements. All participants provided informed consent prior to participation. Ethical approval was obtained from the Swedish Ethical Review Authority (reference number: 2023&#x2011;05949&#x2011;01). Participants received a modest gift card as compensation for their time.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>The results are presented according to the biomedical ethical principles: (1) autonomy; (2) beneficence; (3) nonmaleficence; (4) justice; and (5) trust, truthfulness, and confidentiality, followed by illustrative quotations.</p><sec id="s3-1"><title>Autonomy: AI&#x2019;s Role in Agency of People With PD</title><sec id="s3-1-1"><title>Overview Works</title><p>The overall feeling was that AI has the potential to facilitate or hinder autonomy depending on how it is implemented and adopted. Participants mostly expressed ways in which AI could increase autonomy through meaningful and personalized insights and respective actions to take, both independently and in collaboration with a clinician. However, if the tools were to be implemented as a strictly clinician-facing tool, they would reduce rather than facilitate the autonomy of people with PD.</p></sec><sec id="s3-1-2"><title>AI Could Increase One&#x2019;s PD Knowledge and Understanding</title><p>Participants believed AI could be leveraged to increase their knowledge of their general health and PD&#x2014;a crucial part of autonomy&#x2014;through assisted self-tracking. Access to greater insights into one&#x2019;s PD enables people with PD to make choices that align with their values, goals, and individual needs. Those who already used digital health tools to monitor parameters such as sleep and heart rate expressed a desire to track PD-specific symptoms as well. Those who had tracked PD symptoms were interested in improved tracking and additional symptom monitoring.</p><disp-quote><p>[I&#x2019;d want feedback on] tremor. When do I have tremor, what severity? My gait, like do I walk faster in the morning or in the afternoon? Ideally, if possible, linked to data that I would probably have to input myself like, am I stressed or tired? Am I hungry? Did I exercise today?</p><attrib>Female, person with PD 04, FG1</attrib></disp-quote><p>Participants hoped that AI could find patterns in signs and symptoms that they and their health care professionals (HCPs) could not. The advanced analyses of AI could help explain why fluctuations and specific symptoms occur and in relation to medication, menstrual cycle, and lifestyle.</p><disp-quote><p>I recently had a couple of allergic reactions, and I had to get cortisone shots. I noticed straight after &#x2026; that my tremor was much better, and I felt better overall. That&#x2019;s interesting. So I googled histamine cortisone shots &#x2026; it turned out that a lot of people searched for antihistamines and cortisone and Parkinson&#x2019;s. So that&#x2019;s just an example of the type of information that I feel is out there</p><attrib>Female, person with PD 04, interview</attrib></disp-quote><p>To maximize value for people with PD, AI should provide personalized insights, considering variations in an individual&#x2019;s characteristics (eg, their habits) and over time (eg, injury or diet changes), as well as disease characteristics. Building on self-tracking analyses, participants were interested in receiving an AI-generated prognosis, though the level of detail desired and the value attributed varied between participants. Some wanted to know a timeline and expected rate of progression to feel less in the dark about the future and to, for example, plan retirement.</p><disp-quote><p>I would think that living in uncertainty, just knowing I have this and this and it&#x2019;s inevitable, it will get worse, but I don't know if it&#x2019;s in six months, or six years or 16 years, that to me after careful evaluation is not a good scenario.</p><attrib>Male, person with PD 03, FG1</attrib></disp-quote><p>As one of the AI tools exemplified within the project was for PD risk prediction, participants reflected on predating PD symptoms and diagnosis. Between them, they described receiving their PD diagnosis anywhere from 2 months to 3 years after first seeking care for symptoms, which most felt was on the faster side for PD. However, some participants felt that AI has the potential to shorten this time of uncertainty. A participant who first received anxiety medication (without relief) for his PD symptoms expressed that &#x201C;... if I had directed my attention to the symptoms at that time, and if there were an app that could identify the symptoms, I think it would have been faster to identify the disease<italic>&#x201D;</italic> (male, person with PD 02, FGD1). Several participants reflected that, as with a prognosis, they would have liked to have found out sooner that they had PD, as it would have allowed them to prepare:</p><disp-quote><p>Those two aspects, the possibilities of getting the disease, and predicting the rate of progression, I think that&#x2019;s very important. It&#x2019;s very important for us with Parkinson&#x2019;s to have an idea, even in order to then prepare the future.</p><attrib>Male, person with PD 02, interview</attrib></disp-quote></sec><sec id="s3-1-3"><title>AI Results Must Be Actionable</title><p>Participants wanted more information about their PD patterns and prognosis, so long as it was <italic>actionable</italic>, whether that be preparing mentally or changing lifestyle practices. It was mentioned that many of the prodromal symptoms could easily be alleviated if detected and provided with feedback, such as &#x201C;<italic>constipation, handwriting, carrying cups of tea ... eating soup with a spoon, you know, practical things</italic>&#x201D; (Female, person with PD 11, FG1). There was interest in AI-generated trajectories demonstrating pathways based on current lifestyle factors and alternative pathways based on assumed changes.</p><disp-quote><p>If AI can tell us specifically that if I do some things, then in 2-3 years, I&#x2019;ll be at this point, and if I don&#x2019;t, I&#x2019;ll be at that point, I think that will be very motivating to help people change attitudes to the treatments of the disease.</p><attrib>Male, person with PD 02, interview</attrib></disp-quote><p>Although opinions were divided, some people with PD felt that a risk score or disease prediction could motivate disease-altering behavior change, such as diet and exercise.</p><disp-quote><p>&#x2026; predicting the diagnosis can be very helpful if it helps people to change their behavior, because they know that they&#x2019;re in the race to get the disease. And if we think well, before the first symptoms, we already had the disease 10, 15, 20 years before.</p><attrib>Male, person with PD 02, interview</attrib></disp-quote><p>A participant with genetic PD wondered whether, had she been provided with AI-assisted monitoring, she and her physician could have identified her prodromal symptoms more easily and received lifestyle advice to delay or even prevent PD onset.</p><disp-quote><p>My mother has Parkinson&#x2019;s. I&#x2019;ve had low blood pressure all my life, which is also a symptom. I was constantly tired for years. And if the doctor had had a bit of knowledge of Parkinson&#x2019;s, maybe she could have said, &#x201C;you know, you have some risk. Maybe you can take a genetic test&#x201D; &#x2026; if I&#x2019;d known that I had a genetic mutation, maybe I could have started exercising often as a prevention of the motor symptoms or delayed the motor symptoms &#x2026; or taken better care of myself the moment I started to feel the first symptoms.</p><attrib>Female, person with PD 08, interview</attrib></disp-quote><p>It was noted that one must be &#x201C;very healthy mentally&#x201D; to become motivated rather than worried. Some participants felt that knowing one&#x2019;s prognosis would do little or nothing for autonomy, given that PD has no cure.</p></sec><sec id="s3-1-4"><title>Sharing AI-Generated Data</title><p>The potential applications and benefits of AI in health care are fundamentally dependent on ensuring that the data being collected are accessible to the end users. Participants emphasized their right to be informed about what aspects of their health AI systems are tracking, as well as the necessity of data interpretability to maintain autonomy in their care. If such data were withheld from patients and exclusively provided to clinicians, it would undermine the ability of people with PD to actively engage in the interpretation and management of their condition. Furthermore, the presentation of data in an overly complex or opaque manner would similarly impede patient autonomy by limiting their capacity to make informed decisions about their care.</p><disp-quote><p>Facilitator: ...would it make a difference if the information was presented only to the doctor? Or if it was presented to the user themselves?</p><p>Person with PD (male): I would like it to be presented to me first, together with a really well-thought-out analysis of what this means and what the potential developments are &#x2026; I&#x2019;m doing all these tests and I know that the results are sent to my doctor, and that might not return to me. Everything might be misinterpreted or something. I would like to have control over that.</p></disp-quote><p>To enhance interpretability, participants preferred trends and visualizations over raw numerical data. Beyond fostering greater autonomy in managing their PD, they emphasized that AI should be designed to promote a collaborative partnership between HCPs and people with PD. AI-assisted monitoring could extend insights beyond sporadic clinical visits, capturing meaningful data in a structured format. This, in turn, could enable more focused and relevant discussions during consultations, ensuring that both parties can prioritize what truly matters in patient care.</p></sec><sec id="s3-1-5"><title>Limitations of Greater Information</title><p>Participants expressed some nuanced opinions about the limitations of autonomy; however, this was especially true for individuals without symptoms. Individuals who perceive themselves as healthy may see little value in assessing their risk for a chronic illness.</p><disp-quote><p>Are [people at risk] going to want to know if they are sick anyway? We have an audience who have Parkinson&#x2019;s, who are not interested at all in investigating what&#x2019;s going to happen to them. And now we&#x2019;re talking about a client population who haven&#x2019;t even gotten it yet. So, we are searching for a statement of needs and wants for a community that would find it very hard themselves to set the parameters for this project.</p><attrib>Male, person with PD 10, FG2</attrib></disp-quote><p>Additionally, the addictive nature of digital gadgets and personalized summaries may cost some people autonomy:</p><disp-quote><p>people with Apple watches in their life, they&#x2019;re all obsessed about, &#x201C;Oh, look, my watch told me I slept really well last night.&#x201D; It&#x2019;s like, &#x201C;don&#x2019;t you know that yourself?&#x201D;</p><attrib>Female, person with PD 04, FG1</attrib></disp-quote><p>It was also noted that the participants, &#x201C;by virtue of us being on this call,&#x201D; were willing and able to participate in a panel and are likely <italic>&#x201C;</italic>the cohort that wants to implement things and make it the best journey&#x201D; (female, person with PD 11, FG1). However, such AI tools and methods would likely not be equally valuable for all people with PD.</p></sec></sec><sec id="s3-2"><title>Beneficence: Maximizing AI&#x2019;s Positive Impact</title><p>Participants identified multiple ways in which AI could offer benefits to their health, particularly by improving data collection, optimizing clinical care, and enabling early detection and intervention.</p><sec id="s3-2-1"><title>AI Could Optimize Data Collection and Analysis</title><p>In terms of data collection and analysis methods, participants tended to express that AI would be necessary to advance the understanding of their condition. Participants highlighted that AI tools enable automated, continuous data collection and can analyze vast datasets, reducing the burden on both patients and HCPs.</p><disp-quote><p>&#x2026; you have lots of data about symptoms and side effects and &#x2026; so on &#x2026; something that is very hard for individual people to take in, analysis, but &#x2026; that&#x2019;s exactly what machine learning is good at, both detecting patterns in complex sets of data &#x2026; but also in using this data continuously, so that you can predict what is going to happen &#x2026; which should be very important information for the healthcare sector...</p><attrib>Male, person with PD 03, interview</attrib></disp-quote><p>AI&#x2019;s ability to detect patterns and correlations beyond human capability is seen as a key advantage&#x2014;for example, synthesizing anecdotal online information or scanning extensive patient records to uncover insights. By capturing and analyzing large datasets, AI can unlock valuable real-world data that traditional in-clinic assessments often overlook, transforming previously &#x201C;trapped&#x201D; information into actionable knowledge.</p></sec><sec id="s3-2-2"><title>AI Could Support HCPs With Improved and Timely Care</title><p>Participants felt AI could enhance beneficence by helping HCPs make better-informed decisions and personalizing PD treatment. Longitudinal monitoring and predictive-AI would provide additional data and thus insights for HCPs between the short, infrequent visits. PD care has long followed the same &#x201C;gold standard,&#x201D; and AI could enable more personalized and efficient (self-)care decisions, something participants felt was long overdue. Such insights could improve medication adjustments, detect emerging symptoms, avoid complications, and optimize interventions such as deep brain stimulation tuning and treatment for comorbidities. AI-observed trends could be triangulated with the HCPs&#x2019; observations for a more holistic and accurate assessment, especially since PD can be observed differently even between 2 neurologists, participants noted: <italic>&#x201C;</italic>[AI] is a tool. I don&#x2019;t think it should be used to substitute doctors and people, but can be used in a clever way&#x201D; (female, person with PD 08, interview).</p><p>Participants stressed that AI could never replace the value of a human but could reduce the number of visits and automate or eliminate parts of HCPs&#x2019; work. Participants also expressed hope that monitoring could facilitate early detection of new symptoms and medication side effects, such as dyskinesia, to allow early intervention and avoid certain symptoms and side effects. Therefore, although not viewed as attractive for persons at risk, participants felt a risk score might be valuable for HCPs to monitor relatives of people with genetic PD and intervene when necessary.</p><disp-quote><p>[AI]&#x2019;s not giving you a diagnosis; it&#x2019;s giving you a risk score &#x2026; a warning that you should be aware of that. So, whether that comes from an app, or a conference or a doctor &#x2026; It&#x2019;s just telling you to be careful because you have a high risk of bearing the disease. It&#x2019;s not telling you you're going to have the disease.</p><attrib>Male, person with PD 02, FG1</attrib></disp-quote><p>Another provocative suggestion was to use AI to predict an individual&#x2019;s psychological response to an AI-generated risk score&#x2014;an idea that paradoxically both acknowledges AI&#x2019;s risks and expresses trust in its ability to mitigate them.</p></sec></sec><sec id="s3-3"><title>Nonmaleficence: &#x201C;Do No Harm&#x201D;</title><sec id="s3-3-1"><title>Overview</title><p>A major ethical concern of using predictive AI for PD was perceived to be psychological harm, especially regarding risk prediction. Participants emphasized that HCPs must carefully balance potential harms against anticipated benefits when integrating AI into PD care. They underscored the importance of involving people with PD in the design phase to mitigate risks and minimize potential harm. Ensuring patient participation in AI development was seen as crucial for aligning technological advancements with patient needs, thereby enhancing both safety and usability.</p></sec><sec id="s3-3-2"><title>Potential Harm of Knowing</title><p>Participants worried that receiving a negative prognosis, or (especially for someone who was otherwise healthy) receiving a high-risk score, could carry grave consequences. There was a fear of causing depression or anxiety, and an early diagnosis might do more harm than good, given that PD is incurable.</p><disp-quote><p>Do you really want to know that it might be PD at this stage? Perhaps not. You should think about how you communicate [AI-generated predictions]. What would be the benefit if you could identify Parkinson&#x2019;s or some other neurological disorder at a very early stage?</p><attrib>Male, person with PD 03, FGD1</attrib></disp-quote><p>Reflecting on their own diagnosis experiences, which were with a human HCP who could still deliver information better than a digital tool, participants stressed that one&#x2019;s reaction to such news is hard to predict.</p><disp-quote><p>I&#x2019;m highly concerned over the thought of an app presenting somebody with the news, &#x201C;Oh, congratulations, you probably will develop Parkinson&#x2019;s,&#x201D; when I remember how I felt when the doctor said &#x2026; &#x201C;I&#x2019;m so sorry I was so direct, you seemed very calm and able to process the information.&#x201D; I was like, I&#x2019;m totally calm until somebody tells me I have a life-changing disease, you know?</p><attrib>Female, person with PD 04, FG1</attrib></disp-quote><p>Participants hoped for future research advancements leading to improved treatment options and maybe even a cure, at which point knowing one&#x2019;s risk would be worth the stress. However, as little or no action can be taken at this point, several felt that, currently, the harm of knowing outweighs the benefit.</p><disp-quote><p>A friend of mine, who I hadn&#x2019;t spoken to in a few years asked me, &#x201C;So what&#x2019;s your prognosis? How are you going to end up in 10 years?&#x201D; I had no idea. Nobody can tell me. Maybe that&#x2019;s a good thing.</p><attrib>Female, person with PD 04, interview</attrib></disp-quote><p>Another participant reflected: &#x201C;[Those] who might be more motivated [to monitor risk] are people who know people with Parkinson&#x2019;s, who would equally be more terrified of finding out they&#x2019;ve got it&#x201D; (female, person with PD 11, FG1). Others worried that widespread AI-based predictions might contribute to increased medical anxiety or hypochondria, driving unnecessary health care utilization. &#x201C;I think [AI-generated risk prediction] might attract the wrong sort of people ... the people who always think that they have everything, they might be the ones using it, but not the ones you really want to target&#x201D; (female, person with PD 09, FGD2).</p><p>Beyond psychological harm, participants raised concerns about the broader social and financial consequences of AI-driven risk prediction. A risk score could influence insurance policies, employment opportunities, or retirement plans, potentially leading to discrimination or financial instability. Participants had previously found benefits in monitoring and self-tracking but also reported experiencing the burden this can pose: &#x201C;Anything you have to do every day, even if it only takes one minute, it interrupts your daily routine&#x201D; (male, person with PD 03, FG1). Time and motivation vary by individual and even over time for the same individual. This also applies to knowing AI predictions; some want to know everything about their health, while others are stressed by it. Individual and situational characteristics must be weighed in each case so as not to cause more harm than good when implementing predictive AI. Passive data collection, such as through a smartwatch or monitoring of mobile typing, was mentioned as positive means of lessening this burden while also collecting the desired data.</p></sec><sec id="s3-3-3"><title>Importance of Guidelines for Implementation</title><p>Participants stressed that diagnoses or risk scores should only be delivered by trained professionals, never directly by an app: &#x201C;We all hear these horror stories from diagnosis meetings with your doctor, or how they treat you&#x201D; (male, person with PD 03, FG1). Some had experienced distressing diagnostic encounters, such as being told their symptoms could indicate either brain cancer or PD, with no clarification until after the weekend. This raised concerns that AI-driven predictive models might similarly communicate results in an alarming way. Participants also questioned the accuracy of AI predictions, emphasizing that specialists should assess and contextualize results before disclosure. To prevent harm, clear guidelines must be established on delivering sensitive, life-altering information. As one participant put it: &#x201C;You just don&#x2019;t know how people are going to react. Ideally, this should always be done by a trained human professional&#x201D; (female, person with PD 04, FG1). Guidelines for HCPs when delivering such results &#x201C;should have been done a long time ago.&#x201D;</p></sec></sec><sec id="s3-4"><title>Justice: Fair and Equitable Use of AI</title><p>While participants felt they could potentially benefit from predictive AI and personalized monitoring, they also expressed concerns that not everyone has equal access to the resources necessary to implement AI-generated recommendations.</p><disp-quote><p>I&#x2019;m convinced the reason I&#x2019;m doing semi-okay after 12 years [with PD] is, among other things, lifestyle, exercise, nutrition, and lack of stress. But I&#x2019;m also acutely aware that that&#x2019;s not a solution for a young father with three kids and a full-time job, or somebody living in Uganda, or, yeah, most of this plan is in fact, saying I do yoga every morning and try to cut down on stress. It&#x2019;s very much a first-world solution to the problem. Though, finding universally applicable ways of slowing progression would be a personal priority of mine.</p><attrib>Female, person with PD 04, interview</attrib></disp-quote><p>Tools such as predictive AI, it was perceived, could further increase inequalities with regard to autonomy and beneficence among different individuals in PD care. Another question of fairness was related to the resources that AI requires and whether the added benefit of such a toolkit as this would outweigh the cost.</p><disp-quote><p>If [patients] don&#x2019;t want to know, well, then that&#x2019;s their personal choice. But I think if they don&#x2019;t want to know anyways, then there might be no use in using AI and wasting resources for someone who doesn&#x2019;t want to know.</p><attrib>Female, person with PD 09, interview</attrib></disp-quote></sec><sec id="s3-5"><title>Trust, Truthfulness, and Confidentiality: Transparency and Reliability of AI</title><p>Privacy and confidentiality were key concerns among participants, who emphasized the need for control over their own data, including the ability to choose who can access their health information&#x2014;whether care partners, HCPs, or third parties: &#x201C;I live in Germany where people are, I would go so far as to say, obsessed about data protection&#x201D; (female, person with PD 09 FG1). Participants stressed the need for transparency in data collection, storage, and sharing practices, ensuring that personal health data remain protected and used only with explicit consent.</p><disp-quote><p>I have a genetic mutation. When I got my results, my doctor put it in a sealed envelope and said to save it very carefully, &#x201C;this is the most important information you can have.&#x201D; And I actually didn&#x2019;t even think too much about it. But I think for a lot of people, genetic information is important. It&#x2019;s extremely important and private. I think it would be important for [predictive AI] to know what kind of mutation I have, but I would also be afraid that others would know it or would misuse it.</p><attrib>Female, person with PD 08, FG2</attrib></disp-quote><p>Building trust in AI-driven PD tools requires both transparency and user control. Participants were hesitant about whether neurologists could accurately interpret their data without their input. They emphasized that people with PD should have access to their data first, before any report is sent to HCPs, ensuring transparency and patient involvement in interpretation. Participants questioned how accurate or &#x201C;truthful&#x201D; AI predictions could be, given the extreme complexity and variability of PD, although they were still interested in the potential insights AI could provide. &#x201C;I would like to know as much [of the AI-predictions] as possible, but I also want to know the accuracy of those predictions&#x201D; (male, person with PD 03, FG1). Another participant felt confident enough in her disease stability to be interested in an AI-generated prognosis:</p><disp-quote><p>I personally would like to know ... [though] I&#x2019;m feeling more secure in my diagnosis seven years down the line, having seen how I&#x2019;ve used the PRO-PD score, and I&#x2019;ve been monitoring and I&#x2019;m staying relatively stable, and I&#x2019;m happy with it</p><attrib>Female, person with PD 11, FG1</attrib></disp-quote><p>To increase trust and perceived value, participants wanted clear explanations of how the AI models work, their limitations, and their accuracy rates so as to facilitate interpretation rather than misleading.</p><disp-quote><p>When we deploy [these AI tools], it is really important to emphasize how the experience of every single user is included, this is one of the pillars of AI &#x2026; Because once you realize that people&#x2019;s experiences are taken into account, then people will trust it for sure.</p><attrib>Male, person with PD 12, interview</attrib></disp-quote></sec><sec id="s3-6"><title>Additional Reflections</title><p>Beyond bioethical concerns, participants highlighted practical challenges, such as cultural differences across the EU, which could complicate AI implementation:</p><disp-quote><p>... the intellectual part of this challenge is how do you construct a model of care that it can dovetail with well, sometimes absolutely, utterly different [healthcare systems] and different resourcing situations?</p><attrib>Male, person with PD 10, interview</attrib></disp-quote></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>Participants expressed both potential benefits and ethical concerns of implementing AI in PD management. While optimistic that AI could enhance autonomy and beneficence by capturing one&#x2019;s unique health patterns, motivating behavior change, and enabling shared decision-making, concerns arose over patient involvement, privacy, and the psychological impact of predictions. Risk prediction, PD prognosis, and medication response prediction were perceived differently in terms of value and ethical considerations, with risk prediction seen as the most ethically controversial, given that people with PD often have come to terms with their diagnosis in a way that those at risk would not have. Participants cautioned that AI could widen health care disparities and emphasized the need for transparency, user control, and the involvement of people with PD in development to ensure ethical implementation.</p><p>Consistent with others&#x2019; findings, participants felt that longitudinal monitoring and predictive AI have the potential to improve understanding and communication for both people with PD and HCPs, ultimately leading to more personalized and appropriate (self-)care decisions [<xref ref-type="bibr" rid="ref14">14</xref>]. However, participants worried about inconsistencies in the interpretation of data between people with PD and HCPs, with one participant suggesting to see and approve AI predictions before sharing them with their HCP.</p><p>Participants, in alignment with previous research [<xref ref-type="bibr" rid="ref22">22</xref>], were interested in AI solutions that provide actionable insights, as this increases autonomy over one&#x2019;s health. A review of technologies within PD cites multiple examples of how providing feedback from the digital monitoring elicited positive outcomes for the people with PD, compared to when the information was exclusively for HCPs [<xref ref-type="bibr" rid="ref12">12</xref>]. This becomes more complex, however, with the addition of predictions. Participants were interested in AI-generated disease pathways and alternatives based on interventions, although not without skepticism. As others discuss, this may provide more false hope than benefit, given that there are currently no disease-modifying treatments, and even if such therapies become available, it may take many years to be individually tailored, given the heterogeneity of PD [<xref ref-type="bibr" rid="ref22">22</xref>]. This leads to concerns such as people with PD feeling like a failure after complying with an intervention without any noticeable difference. In addition to symptom- and disease-altering recommendations, a motivation for predictive AI noted in our study and by others was that having a better idea of one&#x2019;s future could allow them to prepare mentally and logistically (eg, retirement) [<xref ref-type="bibr" rid="ref22">22</xref>]. Given the gravity of personal predictions and the uncertainty of model accuracy, transparency in terms of what defines a given prediction or recommended intervention and the accuracy of the models is crucial, to allow people with PD to interpret the information<bold>&#x2014;</bold>with the help of their HCP. AI medication response prediction was viewed as the least ethically complicated and could allow participants together with their HCPs to optimize not only which medication to prescribe but also how to maximize its effects by identifying patterns.</p><p>Schaeffer et al [<xref ref-type="bibr" rid="ref9">9</xref>] surveyed experts on whether to disclose PD risk and how (ie, as a percentage/value or using general terms); however, research on this topic from the patient perspective is limited. Our study indicated that graphs and trends were preferred over raw numbers, which were not seen as interpretable or actionable. Consistent with our findings, the experts surveyed cautioned that risk predictions could likely result in psychological distress, especially when there are questions about the accuracy of the prediction [<xref ref-type="bibr" rid="ref9">9</xref>]. In alignment with previous findings, our participants stressed the importance of receiving AI insights from a trained HCP who can help interpret and guide them [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref22">22</xref>].</p><p>Participants alluded to the subject of overdiagnosis as potential outcomes of AI-assisted PD risk screening. Defined as &#x201C;making people patients unnecessarily,&#x201D; overdiagnosis involves a diagnosis that meets all diagnostic criteria (not misdiagnosis) but causes the patient more harm than benefit [<xref ref-type="bibr" rid="ref23">23</xref>]. A type of overdiagnosis is overdetection<italic>,</italic> or the identification of abnormalities that resolve themselves, never progress into something harmful, or else progress slowly enough that they never cause harm during a person&#x2019;s remaining lifetime [<xref ref-type="bibr" rid="ref23">23</xref>]. This question regarding the value of early detection, in this case of prodromal PD, when symptoms may be minimal or unnoticed and the prognosis of the complex disease is uncertain, shows the relevance of the &#x201C;right <italic>not</italic> to know.&#x201D;</p><p>While participants stressed their &#x201C;right to know&#x201D; any analyses of their health data, they also highlighted the potential psychological damage and other unintentional consequences that could arise, and hence the &#x201C;right <italic>not</italic> to know.&#x201D; This ethical dilemma is discussed by Schaeffer et al [<xref ref-type="bibr" rid="ref9">9</xref>] in the context of disclosing PD risk to patients with RBD, in which they state that &#x201C;there is not only a right to know, there is also the &#x2018;right <italic>not</italic> to know.&#x2019;&#x201D; Davies [<xref ref-type="bibr" rid="ref24">24</xref>] outlines 2 main arguments for the &#x201C;right <italic>not</italic> to know&#x201D; as to respect autonomy and avoid harm; when a diagnosis can lead to distress and social stigma, and when there is no effective cure, the diagnosis may not be worth it. However, in a response to Schaeffer et al, Karagianis [<xref ref-type="bibr" rid="ref25">25</xref>] points out that in order to consent to not knowing, one must first understand that risk (in this case, a patient with RBD knowing their heightened risk for neurodegenerative disease) and consent to being monitored but without knowing any resulting risk scores. Davies [<xref ref-type="bibr" rid="ref24">24</xref>] highlights an additional complexity: because PD can be genetically inherited, learning one&#x2019;s own genetic information may inadvertently disclose information about family members who have not consented to receive it. Whether to disclose such information can become a moral burden for the patient receiving the information. An interesting finding was that a participant wondered if AI could be used to predict how someone might react to an AI health prediction, essentially using the problem as the solution.</p></sec><sec id="s4-2"><title>Future Research and Implications</title><p>There is no question that AI is an increasingly important topic in health care. The 2024 Revision to the Declaration of Helsinki for ethical medical research [<xref ref-type="bibr" rid="ref26">26</xref>] is a reflection of this trend and provides concrete focus areas that directly shape future research priorities for AI in medicine, including improved AI literacy and clarity regarding the risks of current and future AI applications. These international principles have direct implications for the use of AI in PD. For instance, the declaration&#x2019;s emphasis on protecting participant data and establishing ethical data governance speaks directly to the primary concerns identified in AI health monitoring for PD, such as privacy, data ownership, and the potential for stigmatization or discrimination based on AI-generated health predictions. Future research must therefore prioritize creating frameworks that address who controls the vast amounts of personal health data collected by monitoring systems and how the data can be used without harming the individual&#x2019;s social and economic well-being. Furthermore, the revised declaration highlights the need for transparency and accountability, which aligns with the FUTURE-AI framework&#x2019;s principles of traceability and explainability. This is critical in PD, where disclosing information about future disease risk is already fraught with ethical complexity due to prognostic uncertainty and the lack of disease-modifying therapies. The FUTURE-AI Framework, developed by interdisciplinary experts from 50 countries to ensure trustworthy and ethical implementation of AI in health care, warns of many of the same ethical concerns, such as privacy, transparency, potential patient harm, and driving inequities. The framework provides 6 guiding principles: fairness, universality, traceability, usability, robustness, and explainability to guide best practice for the development and implementation of AI in health care [<xref ref-type="bibr" rid="ref27">27</xref>]. This is especially important, as the increased adoption of AI could likely introduce new challenges and responsibilities for HCPs, in addition to avoiding patient harm.</p><p>Additionally, it is imperative that future research explores how these ethical considerations are influenced by diverse global contexts, including different health care systems, cultural attitudes toward technology, and varying levels of health care access. The ethical frameworks guiding AI development, such as the Declaration and the FUTURE-AI framework, are intended to be international. However, their practical application will inevitably vary.</p><p>Furthermore, AI-assisted decision-making raises questions about accountability; while human error is an expected risk in clinical practice, it is unclear if AI-driven errors are expected or acceptable. For example, it remains unclear to what extent HCPs should be held responsible for mistakes arising from AI-driven recommendations. Despite these challenges, AI could offer valuable insights into how people with PD are doing between the infrequent and brief clinical visits and foster self-tracking and self-care for a more collaborative approach to care.</p></sec><sec id="s4-3"><title>Strengths and Limitations</title><p>There are strengths and limitations to consider when interpreting the results. The panel&#x2019;s reflections were based on hypotheticals, and ahead of a prototype, making them more general. People with PD discussed the risk prediction tools in the context of their diagnosis journey, which may impact their perspective, but this also mitigates the ethical concerns of asking and worrying people actually at risk. Another challenging but interesting aspect of this study is that it focused on estimating PD risk and prognosis as well as medication response for people with PD. This made it at times difficult to separate which tools and predictions participants referred to.</p><p>We did not explicitly ask about ethics in either form of data collection, so it is possible that the participants could have additional thoughts on ethical considerations. The topic, instead, emerged naturally among the participants, which is also a strength, as it was unsolicited, highlighting the importance of ethics in such a context. Another consideration is that the data included questions centered around the AI-PROGNOSIS project specifically, as well as about AI in health care and research in general. This was considered in the analysis process, and a project example was in many cases useful to structure and focus the conversation. In some cases, participants spoke of all AI as one, making it unclear as to whether they were referencing predictive or perhaps generative AI, which may reflect the &#x201C;black box&#x201D; behind AI and indicate a need for user education when implementing such tools.</p><p>It is a strength that our participants represent multiple European countries; however, there are limitations to consider regarding the transferability of our findings. Paccoud et al [<xref ref-type="bibr" rid="ref28">28</xref>] found that user preferences varied across EU countries and clinical and sociodemographic factors, though our participants did not mention many cultural factors. Our participants, being on an expert panel, likely differ from the average people with PD in many ways, given that they are well educated, quite involved in their own PD management, as well as PD awareness or advocacy. Many were also quite tech-savvy and had self-tracked in some manner prior, suggesting that this group might be more open toward AI but might also have more specific and well-thought-out feedback. Additionally, several of our participants have young-onset PD, and the mean age at the time of this study was 53.5 (SD 11.4), younger than the average people with PD. Older or cognitively impaired people with PD may encounter more barriers to navigating and interpreting the AI feedback and may not engage with the technology as much as our participants wish to. A recent survey on willingness to use digital medical devices among people with PD in Europe found that willingness decreased with age [<xref ref-type="bibr" rid="ref28">28</xref>]. Another study found a strong significant correlation between cognitive ability and perceived ability to use everyday technology among people with PD [<xref ref-type="bibr" rid="ref29">29</xref>]. At the same time, it may be considered a strength that our study provides insights into the ethical considerations of disclosing risk and progression to middle-aged individuals without cognitive impairment, some of whom are still working, for whom a prediction of disease progression may represent a more pronounced shock or harsher news.</p><p>Finally, given that our participants were Caucasian and from high-income countries, their experiences and perspectives may be very different from those from minority groups and of low- and middle-income countries, as one of our participants pointed out.</p></sec><sec id="s4-4"><title>Conclusions</title><p>Our findings highlight the need to balance hope and harm in the application of AI in PD management. We outline potential benefits for people with and without PD, HCPs, and research, as well as important concerns from the perspective of people with PD. Involving people with PD in development going forward is crucial for aligning technological advancements with patient needs and ensuring the successful and ethical adoption of AI in PD care.</p></sec></sec></body><back><ack><p>We thank the participants for their time and insights. We also thank Ali Saad for providing the introduction to artificial intelligence prior to the focus groups.</p></ack><notes><sec><title>Funding</title><p>This research is part of the AI-PROGNOSIS project funded by the European Union (EU) under Grant Agreement No. 101080581. Views and opinions expressed are, however, those of the authors and do not necessarily reflect those of the EU or European Health and Digital Executive Agency. Neither the EU nor the European Health and Digital Executive Agency can be held responsible for these views and opinions.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization occurred in collaboration between SR, MH, TSD, CB, and JLL, with all authors discussing and guiding the study direction. SR, AC, and JLL recruited participants, JLL conducted the interviews, and SR and AC facilitated the focus groups. JLL transcribed and analyzed the data with input from CB and SR, who each provided key expertise. JLL prepared the first draft, including figures and writing. All authors contributed revisions, especially SR and CB. All authors read and approved the final manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">EU</term><def><p>European Union</p></def></def-item><def-item><term id="abb3">FG</term><def><p>focus group</p></def></def-item><def-item><term id="abb4">HCP</term><def><p>health care professional</p></def></def-item><def-item><term id="abb5">PD</term><def><p>Parkinson disease</p></def></def-item><def-item><term id="abb6">RBD</term><def><p>rapid eye movement sleep behavior disorder</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Albin</surname><given-names>R</given-names> </name><name name-style="western"><surname>Grotewold</surname><given-names>N</given-names> </name></person-group><article-title>What is the Parkinson pandemic?</article-title><source>Mov Disord</source><year>2023</year><month>12</month><volume>38</volume><issue>12</issue><fpage>2141</fpage><lpage>2144</lpage><pub-id pub-id-type="doi">10.1002/mds.29637</pub-id><pub-id pub-id-type="medline">37859586</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tolosa</surname><given-names>E</given-names> </name><name name-style="western"><surname>Garrido</surname><given-names>A</given-names> </name><name name-style="western"><surname>Scholz</surname><given-names>SW</given-names> </name><name name-style="western"><surname>Poewe</surname><given-names>W</given-names> </name></person-group><article-title>Challenges in the diagnosis of Parkinson&#x2019;s disease</article-title><source>Lancet Neurol</source><year>2021</year><month>05</month><volume>20</volume><issue>5</issue><fpage>385</fpage><lpage>397</lpage><pub-id pub-id-type="doi">10.1016/S1474-4422(21)00030-2</pub-id><pub-id pub-id-type="medline">33894193</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chopra</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lang</surname><given-names>AE</given-names> </name><name name-style="western"><surname>H&#x00F6;glinger</surname><given-names>G</given-names> </name><name name-style="western"><surname>Outeiro</surname><given-names>TF</given-names> </name></person-group><article-title>Towards a biological diagnosis of PD</article-title><source>Parkinsonism Relat Disord</source><year>2024</year><month>05</month><volume>122</volume><fpage>106078</fpage><pub-id pub-id-type="doi">10.1016/j.parkreldis.2024.106078</pub-id><pub-id pub-id-type="medline">38472075</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yuan</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Artificial intelligence-enabled detection and assessment of Parkinson&#x2019;s disease using nocturnal breathing signals</article-title><source>Nat Med</source><year>2022</year><month>10</month><volume>28</volume><issue>10</issue><fpage>2207</fpage><lpage>2215</lpage><pub-id pub-id-type="doi">10.1038/s41591-022-01932-x</pub-id><pub-id pub-id-type="medline">35995955</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aghanavesi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bergquist</surname><given-names>F</given-names> </name><name name-style="western"><surname>Nyholm</surname><given-names>D</given-names> </name><name name-style="western"><surname>Senek</surname><given-names>M</given-names> </name><name name-style="western"><surname>Memedi</surname><given-names>M</given-names> </name></person-group><article-title>Motion sensor-based assessment of Parkinson&#x2019;s disease motor symptoms during leg agility tests: results from levodopa challenge</article-title><source>IEEE J Biomed Health Inform</source><year>2020</year><month>01</month><volume>24</volume><issue>1</issue><fpage>111</fpage><lpage>119</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2019.2898332</pub-id><pub-id pub-id-type="medline">30763248</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Broeder</surname><given-names>S</given-names> </name><name name-style="western"><surname>Roussos</surname><given-names>G</given-names> </name><name name-style="western"><surname>De Vleeschhauwer</surname><given-names>J</given-names> </name><name name-style="western"><surname>D&#x2019;Cruz</surname><given-names>N</given-names> </name><name name-style="western"><surname>de Xivry</surname><given-names>JJO</given-names> </name><name name-style="western"><surname>Nieuwboer</surname><given-names>A</given-names> </name></person-group><article-title>A smartphone-based tapping task as a marker of medication response in Parkinson&#x2019;s disease: a proof of concept study</article-title><source>J Neural Transm (Vienna)</source><year>2023</year><month>07</month><volume>130</volume><issue>7</issue><fpage>937</fpage><lpage>947</lpage><pub-id pub-id-type="doi">10.1007/s00702-023-02659-w</pub-id><pub-id pub-id-type="medline">37268772</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Postuma</surname><given-names>RB</given-names> </name><name name-style="western"><surname>Iranzo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Risk and predictors of dementia and parkinsonism in idiopathic REM sleep behaviour disorder: a multicentre study</article-title><source>Brain</source><year>2019</year><month>03</month><day>1</day><volume>142</volume><issue>3</issue><fpage>744</fpage><lpage>759</lpage><pub-id pub-id-type="doi">10.1093/brain/awz030</pub-id><pub-id pub-id-type="medline">30789229</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schaeffer</surname><given-names>E</given-names> </name><name name-style="western"><surname>Rogge</surname><given-names>A</given-names> </name><name name-style="western"><surname>Nieding</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Patients&#x2019; views on the ethical challenges of early Parkinson disease detection</article-title><source>Neurology</source><year>2020</year><month>05</month><day>12</day><volume>94</volume><issue>19</issue><fpage>e2037</fpage><lpage>e2044</lpage><pub-id pub-id-type="doi">10.1212/WNL.0000000000009400</pub-id><pub-id pub-id-type="medline">32291296</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schaeffer</surname><given-names>E</given-names> </name><name name-style="western"><surname>Toedt</surname><given-names>I</given-names> </name><name name-style="western"><surname>K&#x00F6;hler</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rogge</surname><given-names>A</given-names> </name><name name-style="western"><surname>Berg</surname><given-names>D</given-names> </name></person-group><article-title>Risk disclosure in prodromal Parkinson&#x2019;s disease</article-title><source>Mov Disord</source><year>2021</year><month>12</month><volume>36</volume><issue>12</issue><fpage>2833</fpage><lpage>2839</lpage><pub-id pub-id-type="doi">10.1002/mds.28723</pub-id><pub-id pub-id-type="medline">34351002</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morris</surname><given-names>HR</given-names> </name><name name-style="western"><surname>Spillantini</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Sue</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Williams-Gray</surname><given-names>CH</given-names> </name></person-group><article-title>The pathogenesis of Parkinson&#x2019;s disease</article-title><source>Lancet</source><year>2024</year><month>01</month><day>20</day><volume>403</volume><issue>10423</issue><fpage>293</fpage><lpage>304</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(23)01478-2</pub-id><pub-id pub-id-type="medline">38245249</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Padilha</surname><given-names>C</given-names> </name><name name-style="western"><surname>Souza</surname><given-names>R</given-names> </name><name name-style="western"><surname>Grossl</surname><given-names>FS</given-names> </name><name name-style="western"><surname>Gauer</surname><given-names>APM</given-names> </name><name name-style="western"><surname>de S&#x00E1;</surname><given-names>CA</given-names> </name><name name-style="western"><surname>Rodrigues-Junior</surname><given-names>SA</given-names> </name></person-group><article-title>Physical exercise and its effects on people with Parkinson&#x2019;s disease: umbrella review</article-title><source>PLoS One</source><year>2023</year><volume>18</volume><issue>11</issue><fpage>e0293826</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0293826</pub-id><pub-id pub-id-type="medline">37917715</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Riggare</surname><given-names>S</given-names> </name><name name-style="western"><surname>Stamford</surname><given-names>J</given-names> </name><name name-style="western"><surname>H&#x00E4;gglund</surname><given-names>M</given-names> </name></person-group><article-title>A long way to go: patient perspectives on digital health for Parkinson&#x2019;s disease</article-title><source>J Parkinsons Dis</source><year>2021</year><volume>11</volume><issue>s1</issue><fpage>S5</fpage><lpage>S10</lpage><pub-id pub-id-type="doi">10.3233/JPD-202408</pub-id><pub-id pub-id-type="medline">33682728</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sendra</surname><given-names>A</given-names> </name><name name-style="western"><surname>Grosjean</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bonneville</surname><given-names>L</given-names> </name></person-group><article-title>Co-constructing experiential knowledge in health: the contribution of people living with Parkinson to the co-design approach</article-title><source>Qual Health Commun</source><year>2022</year><month>01</month><day>25</day><volume>1</volume><issue>1</issue><fpage>101</fpage><lpage>121</lpage><pub-id pub-id-type="doi">10.7146/qhc.v1i1.124110</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ho</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bavli</surname><given-names>I</given-names> </name><name name-style="western"><surname>Mahal</surname><given-names>R</given-names> </name><name name-style="western"><surname>McKeown</surname><given-names>MJ</given-names> </name></person-group><article-title>Multi-level ethical considerations of artificial intelligence health monitoring for people living with Parkinson&#x2019;s disease</article-title><source>AJOB Empir Bioeth</source><year>2024</year><volume>15</volume><issue>3</issue><fpage>178</fpage><lpage>191</lpage><pub-id pub-id-type="doi">10.1080/23294515.2023.2274582</pub-id><pub-id pub-id-type="medline">37889210</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Beauchamp</surname><given-names>TL</given-names> </name><name name-style="western"><surname>Childress</surname><given-names>JF</given-names> </name></person-group><source>Principles of Biomedical Ethics</source><year>2019</year><publisher-name>Oxford University Press</publisher-name><pub-id pub-id-type="other">9780190640873</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hagstr&#x00F6;m</surname><given-names>J</given-names> </name><name name-style="western"><surname>H&#x00E4;gglund</surname><given-names>M</given-names> </name><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name></person-group><article-title>Adolescent and parental proxy online record access: analysis of the empirical evidence based on four bioethical principles</article-title><source>BMC Med Ethics</source><year>2025</year><month>02</month><day>20</day><volume>26</volume><issue>1</issue><fpage>27</fpage><pub-id pub-id-type="doi">10.1186/s12910-025-01182-9</pub-id><pub-id pub-id-type="medline">39979965</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Farhud</surname><given-names>DD</given-names> </name><name name-style="western"><surname>Zokaei</surname><given-names>S</given-names> </name></person-group><article-title>Ethical issues of artificial intelligence in medicine and healthcare</article-title><source>Iran J Public Health</source><year>2021</year><month>11</month><volume>50</volume><issue>11</issue><fpage>i</fpage><lpage>v</lpage><pub-id pub-id-type="doi">10.18502/ijph.v50i11.7600</pub-id><pub-id pub-id-type="medline">35223619</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reven&#x00E4;s</surname><given-names>&#x00C5;</given-names> </name><name name-style="western"><surname>Hvitfeldt Forsberg</surname><given-names>H</given-names> </name><name name-style="western"><surname>Granstr&#x00F6;m</surname><given-names>E</given-names> </name><name name-style="western"><surname>Wannheden</surname><given-names>C</given-names> </name></person-group><article-title>Co-designing an eHealth service for the co-care of Parkinson disease: explorative study of values and challenges</article-title><source>JMIR Res Protoc</source><year>2018</year><month>10</month><day>30</day><volume>7</volume><issue>10</issue><fpage>e11278</fpage><pub-id pub-id-type="doi">10.2196/11278</pub-id><pub-id pub-id-type="medline">30377143</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Serrano</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Larsen</surname><given-names>F</given-names> </name><name name-style="western"><surname>Isaacs</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Participatory design in Parkinson&#x2019;s research with focus on the symptomatic domains to be measured</article-title><source>J Parkinsons Dis</source><year>2015</year><volume>5</volume><issue>1</issue><fpage>187</fpage><lpage>196</lpage><pub-id pub-id-type="doi">10.3233/JPD-140472</pub-id><pub-id pub-id-type="medline">25588357</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thompson</surname><given-names>J</given-names> </name></person-group><article-title>A guide to abductive thematic analysis</article-title><source>Qual Rep</source><year>2022</year><volume>27</volume><issue>5</issue><fpage>1410</fpage><lpage>1421</lpage><pub-id pub-id-type="doi">10.46743/2160-3715/2022.5340</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Tavory</surname><given-names>I</given-names> </name><name name-style="western"><surname>Timmermans</surname><given-names>S</given-names> </name></person-group><source>Abductive Analysis: Theorizing Qualitative Research</source><year>2014</year><publisher-name>University of Chicago Press</publisher-name><pub-id pub-id-type="other">9780226180458</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van den Heuvel</surname><given-names>L</given-names> </name><name name-style="western"><surname>Knippenberg</surname><given-names>M</given-names> </name><name name-style="western"><surname>Post</surname><given-names>B</given-names> </name><name name-style="western"><surname>Meinders</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Bloem</surname><given-names>BR</given-names> </name><name name-style="western"><surname>Stiggelbout</surname><given-names>AM</given-names> </name></person-group><article-title>Perspectives of people living with Parkinson&#x2019;s disease on personalized prediction models</article-title><source>Health Expect</source><year>2022</year><month>08</month><volume>25</volume><issue>4</issue><fpage>1580</fpage><lpage>1590</lpage><pub-id pub-id-type="doi">10.1111/hex.13500</pub-id><pub-id pub-id-type="medline">35608072</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Brodersen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Schwartz</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Heneghan</surname><given-names>C</given-names> </name><name name-style="western"><surname>O&#x2019;Sullivan</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Aronson</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Woloshin</surname><given-names>S</given-names> </name></person-group><article-title>Overdiagnosis: what it is and what it isn&#x2019;t</article-title><source>BMJ Evid Based Med</source><year>2018</year><month>02</month><volume>23</volume><issue>1</issue><fpage>1</fpage><lpage>3</lpage><pub-id pub-id-type="doi">10.1136/ebmed-2017-110886</pub-id><pub-id pub-id-type="medline">29367314</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davies</surname><given-names>B</given-names> </name></person-group><article-title>The right not to know and the obligation to know</article-title><source>J Med Ethics</source><year>2020</year><month>05</month><volume>46</volume><issue>5</issue><fpage>300</fpage><lpage>303</lpage><pub-id pub-id-type="doi">10.1136/medethics-2019-106009</pub-id><pub-id pub-id-type="medline">32350031</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Karagianis</surname><given-names>JL</given-names> </name></person-group><article-title>Risk disclosure in prodromal Parkinson&#x2019;s disease</article-title><source>Mov Disord</source><year>2022</year><month>06</month><volume>37</volume><issue>6</issue><fpage>1326</fpage><pub-id pub-id-type="doi">10.1002/mds.29038</pub-id><pub-id pub-id-type="medline">35481903</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bibbins-Domingo</surname><given-names>K</given-names> </name><name name-style="western"><surname>Brubaker</surname><given-names>L</given-names> </name><name name-style="western"><surname>Curfman</surname><given-names>G</given-names> </name></person-group><article-title>The 2024 revision to the Declaration of Helsinki: modern ethics for medical research</article-title><source>JAMA</source><year>2025</year><month>01</month><day>7</day><volume>333</volume><issue>1</issue><fpage>30</fpage><lpage>31</lpage><pub-id pub-id-type="doi">10.1001/jama.2024.22530</pub-id><pub-id pub-id-type="medline">39425945</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lekadir</surname><given-names>K</given-names> </name><name name-style="western"><surname>Frangi</surname><given-names>AF</given-names> </name><name name-style="western"><surname>Porras</surname><given-names>AR</given-names> </name><etal/></person-group><article-title>FUTURE-AI: international consensus guideline for trustworthy and deployable artificial intelligence in healthcare</article-title><source>BMJ</source><year>2025</year><month>02</month><day>5</day><volume>388</volume><fpage>e081554</fpage><pub-id pub-id-type="doi">10.1136/bmj-2024-081554</pub-id><pub-id pub-id-type="medline">39909534</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Paccoud</surname><given-names>I</given-names> </name><name name-style="western"><surname>Valero</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Mar&#x00ED;n</surname><given-names>LC</given-names> </name><etal/></person-group><article-title>Patient perspectives on the use of digital medical devices and health data for AI-driven personalised medicine in Parkinson&#x2019;s disease</article-title><source>Front Neurol</source><year>2024</year><volume>15</volume><fpage>1453243</fpage><pub-id pub-id-type="doi">10.3389/fneur.2024.1453243</pub-id><pub-id pub-id-type="medline">39697442</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Johnsson</surname><given-names>C</given-names> </name><name name-style="western"><surname>Malinowsky</surname><given-names>C</given-names> </name><name name-style="western"><surname>Leavy</surname><given-names>B</given-names> </name></person-group><article-title>Everyday technology use among people with Parkinson&#x2019;s disease</article-title><source>Aging Ment Health</source><year>2023</year><volume>27</volume><issue>12</issue><fpage>2430</fpage><lpage>2437</lpage><pub-id pub-id-type="doi">10.1080/13607863.2023.2202628</pub-id><pub-id pub-id-type="medline">37139925</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Interview guide.</p><media xlink:href="ai_v5i1e74144_app1.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Focus group introduction slides.</p><media xlink:href="ai_v5i1e74144_app2.pdf" xlink:title="PDF File, 1452 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Focus group guide.</p><media xlink:href="ai_v5i1e74144_app3.docx" xlink:title="DOCX File, 15 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Demonstration of analysis process.</p><media xlink:href="ai_v5i1e74144_app4.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material></app-group></back></article>