﻿<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.0 20120330//EN" "http://jats.nlm.nih.gov/publishing/1.0/JATS-journalpublishing1.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="nlm-ta">Art Int Surg.</journal-id>
      <journal-id journal-id-type="publisher-id">AIS</journal-id>
      <journal-title-group>
        <journal-title>Artificial Intelligence Surgery</journal-title>
      </journal-title-group>
      <issn pub-type="epub">2771-0408</issn>
      <publisher>
        <publisher-name>OAE Publishing Inc.</publisher-name>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="doi">10.20517/ais.2026.01</article-id>
      <article-categories>
        <subj-group>
          <subject>Original Article</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Automated CTA-based perforator mapping for DIEP flap planning in breast cancer reconstruction</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="author" corresp="yes">
		<contrib-id contrib-id-type="orcid">https://orcid.org/0000-0003-3523-7924</contrib-id>
          <name>
            <surname>Kapila</surname>
            <given-names>Ayush K.</given-names>
          </name>
          <xref ref-type="aff" rid="I1">
            <sup>1</sup>
          </xref>
          <xref ref-type="aff" rid="I2">
            <sup>2</sup>
          </xref>
          <xref ref-type="corresp" rid="cor1" />
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Lamtenzan Marcos</surname>
            <given-names>Diego</given-names>
          </name>
          <xref ref-type="aff" rid="I2">
            <sup>2</sup>
          </xref>
          <xref ref-type="aff" rid="I3">
            <sup>3</sup>
          </xref>
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Ceranka</surname>
            <given-names>Jakub</given-names>
          </name>
          <xref ref-type="aff" rid="I2">
            <sup>2</sup>
          </xref>
          <xref ref-type="aff" rid="I3">
            <sup>3</sup>
          </xref>
          <xref ref-type="aff" rid="I4">
            <sup>4</sup>
          </xref>
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Brussaard</surname>
            <given-names>Carola</given-names>
          </name>
          <xref ref-type="aff" rid="I5">
            <sup>5</sup>
          </xref>
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Boonen</surname>
            <given-names>Pieter Thomas</given-names>
          </name>
          <xref ref-type="aff" rid="I2">
            <sup>2</sup>
          </xref>
          <xref ref-type="aff" rid="I3">
            <sup>3</sup>
          </xref>
          <xref ref-type="aff" rid="I4">
            <sup>4</sup>
          </xref>
          <xref ref-type="aff" rid="I5">
            <sup>5</sup>
          </xref>
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Ledegen</surname>
            <given-names>Laure</given-names>
          </name>
          <xref ref-type="aff" rid="I1">
            <sup>1</sup>
          </xref>
          <xref ref-type="aff" rid="I2">
            <sup>2</sup>
          </xref>
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Vandemeulebroucke</surname>
            <given-names>Jef</given-names>
          </name>
          <xref ref-type="aff" rid="I2">
            <sup>2</sup>
          </xref>
          <xref ref-type="aff" rid="I3">
            <sup>3</sup>
          </xref>
          <xref ref-type="aff" rid="I4">
            <sup>4</sup>
          </xref>
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Hamdi</surname>
            <given-names>Moustapha</given-names>
          </name>
          <xref ref-type="aff" rid="I1">
            <sup>1</sup>
          </xref>
          <xref ref-type="aff" rid="I2">
            <sup>2</sup>
          </xref>
        </contrib>
      </contrib-group>
      <aff id="I1">
        <sup>1</sup>Department of Plastic, Reconstructive and Aesthetic Surgery, Brussels University Hospital - (UZ Brussel), Brussels 1090, Belgium.</aff>
      <aff id="I2">
        <sup>2</sup>Vrije Universiteit Brussel (VUB), Brussels 1050, Belgium.</aff>
      <aff id="I3">
        <sup>3</sup>Department of Electronics and Informatics, Vrije Universiteit Brussel (ETRO.RDI), Brussels 1050, Belgium.</aff>
      <aff id="I4">
        <sup>4</sup>imec (Interuniversity Micro-Electronic Center), Leuven 3001, Belgium.</aff>
      <aff id="I5">
        <sup>5</sup>Department of Radiology, Brussels University Hospital (UZ Brussel), Brussels 1090, Belgium.</aff>
      <author-notes>
        <corresp id="cor1">Correspondence to: Dr. Ayush K. Kapila, Department of Plastic, Reconstructive and Aesthetic Surgery, Brussels University Hospital (UZ Brussel), Brussels 1090, Belgium. E-mail: <email>ayush.kapila@uzbrussel.be</email></corresp>
        <fn fn-type="other">
          <p>
            <bold>Received:</bold> 4 Jan 2026 | <bold>First Decision:</bold> 13 Mar 2026 | <bold>Revised:</bold> 15 Mar 2026 | <bold>Accepted:</bold> 21 Apr 2026 | <bold>Published:</bold> 14 May 2026</p>
        </fn>
        <fn fn-type="other">
          <p>
            <bold>Academic Editor:</bold> Thomas Schnelldorfer | <bold>Copy Editor:</bold> Xing-Yue Zhang | <bold>Production Editor:</bold> Xing-Yue Zhang</p>
        </fn>
      </author-notes>
      <pub-date pub-type="ppub">
        <year>2026</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>14</day>
        <month>5</month>
        <year>2026</year>
      </pub-date>
      <volume>6</volume>
	  <issue>2</issue>
      <fpage>255</fpage>
	  <lpage>67</lpage>
      <permissions>
        <copyright-statement>© The Author(s) 2026.</copyright-statement>
        <license xlink:href="https://creativecommons.org/licenses/by/4.0/">
          <license-p>© The Author(s) 2026. <bold>Open Access</bold> This article is licensed under a Creative Commons Attribution 4.0 International License (<uri xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</uri>), which permits unrestricted use, sharing, adaptation, distribution and reproduction in any medium or format, for any purpose, even commercially, as long as you give appropriate credit to the original author(s) and the source, provide a link to the Creative Commons license, and indicate if changes were made.</license-p>
        </license>
      </permissions>
      <abstract>
        <p>
          <bold>Aim:</bold> Deep inferior epigastric perforator (DIEP) flap breast reconstruction is considered the gold standard for autologous reconstruction. Preoperative perforator mapping using computed tomography angiography (CTA) remains labor-intensive, time-consuming, and subject to interobserver variability. Automated computer-aided detection (CAD) systems may help standardize and accelerate this process. This study aimed to develop and evaluate a proof-of-concept automated CAD pipeline for CTA-based perforator mapping in DIEP flap planning.</p>
        <p>
          <bold>Methods:</bold> A retrospective dataset of 504 CTA scans acquired for DIEP flap planning was analyzed. Fifty-five scans were manually annotated for perforator segmentation, and 100 scans were annotated for umbilicus landmark detection. A dual maximum intensity projection (MIP) depth-aware annotation workflow was introduced to standardize vessel labeling. The automated pipeline combined anatomical region of interest (ROI) localization with deep-learning-based vessel segmentation using a 3D Swin UNETR (Swin Transformer-based) model. Performance was evaluated using the Dice similarity coefficient (Dice), centerline Dice, recall, and the 95th percentile Hausdorff distance (HD95).</p>
        <p>
          <bold>Results:</bold> Depth-aware annotation reduced labeling time by approximately 60%-70%. ROI localization was successful in all scans (28 ± 5 s), and umbilicus localization achieved an error of approximately 2 mm. The Swin UNETR model achieved a median Dice score of 0.58, outperforming Attention U-Net. Continuity-aware training improved Dice to 0.60 and recall to 0.58, while multiclass segmentation improved performance in adipose tissue.</p>
        <p>
          <bold>Conclusion:</bold> This study demonstrates the feasibility of an automated CAD pipeline integrating standardized annotation, anatomical ROI localization, and deep-learning-based vessel segmentation for DIEP flap planning. This represents an important step toward faster, more reproducible, and clinically scalable CTA-based perforator mapping.</p>
      </abstract>
      <kwd-group>
        <kwd>Breast cancer reconstruction</kwd>
        <kwd>DIEP flap surgery</kwd>
        <kwd>computed tomographic angiography</kwd>
        <kwd>computer-aided detection</kwd>
        <kwd>deep learning</kwd>
        <kwd>perforator segmentation</kwd>
        <kwd>artificial intelligence</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec id="sec1">
      <title>INTRODUCTION</title>
      <p>For patients undergoing mastectomy, particularly for breast cancer, autologous reconstruction offers superior long-term aesthetic and psychological outcomes, making efficient and precise planning a critical component of comprehensive cancer care<sup>[<xref ref-type="bibr" rid="B1">1</xref>-<xref ref-type="bibr" rid="B4">4</xref>]</sup>. Accurate preoperative mapping of perforator vessels has a significant impact on outcomes of deep inferior epigastric perforator (DIEP) flap breast reconstruction, which is recognized as the gold standard in autologous breast cancer reconstruction<sup>[<xref ref-type="bibr" rid="B4">4</xref>,<xref ref-type="bibr" rid="B5">5</xref>]</sup>. Although computed tomography angiography (CTA) provides detailed vessel visualization, traditional manual review remains labor-intensive, time-consuming, and prone to significant interobserver variability<sup>[<xref ref-type="bibr" rid="B6">6</xref>]</sup>.</p>
      <p>Radiologists must meticulously review high-resolution datasets, create three-dimensional (3D) reconstructions, and manually measure each perforator’s diameter, intramuscular tunnel length, and skin-exit coordinates. This process typically consumes 30-90 min per patient and demands specialized radiologic and microsurgical expertise, which may limit the availability of this approach to high-volume specialized centers<sup>[<xref ref-type="bibr" rid="B4">4</xref>,<xref ref-type="bibr" rid="B6">6</xref>]</sup>. A previous study suggests that radiologists’ top-ranked perforators align with surgeons’ intraoperative choices only about 67.3% of the time, reflecting subjective differences in how experts weigh various criteria<sup>[<xref ref-type="bibr" rid="B7">7</xref>]</sup>. The absence of standardized protocols for measurements or anatomical grids further compounds these inconsistencies, hindering multicenter collaboration and the establishment of best practices.</p>
      <p>These challenges underscore the clinical need for a fully automated algorithm for perforator selection. Such a tool would not only reduce planning time and eliminate observer bias but also enable accurate CTA-assisted DIEP planning to be performed consistently across reconstructive centers, thereby democratizing access to this superior reconstructive option<sup>[<xref ref-type="bibr" rid="B8">8</xref>]</sup>. This paper details the development of an automated computer-aided detection (CAD) pipeline designed to address these limitations by enhancing efficiency and reproducibility, and ultimately, optimizing the use of artificial intelligence (AI) to support surgeons and improve patient care and experience<sup>[<xref ref-type="bibr" rid="B4">4</xref>,<xref ref-type="bibr" rid="B9">9</xref>-<xref ref-type="bibr" rid="B11">11</xref>]</sup>.</p>
      <p>Automating this process may improve reproducibility, accuracy, and efficiency, which are critically needed for wider clinical adoption in breast cancer surgery and beyond<sup>[<xref ref-type="bibr" rid="B10">10</xref>,<xref ref-type="bibr" rid="B11">11</xref>]</sup>. Although vessel segmentation methods have previously been described, some are semi-automated<sup>[<xref ref-type="bibr" rid="B8">8</xref>,<xref ref-type="bibr" rid="B12">12</xref>]</sup>, whereas others rely on synthetic data<sup>[<xref ref-type="bibr" rid="B13">13</xref>,<xref ref-type="bibr" rid="B14">14</xref>]</sup>. This study develops a high-precision segmentation model for the deep inferior epigastric artery system (DIEA), a critical first step toward fully automated perforator planning, and introduces a proof-of-concept fully automated CAD pipeline for DIEP flap surgery based on real clinical data. This study aims to define the necessary building blocks and outline future work to identify the principal DIEA perforators. In this study, the principal DIEA perforator refers to the perforator identified on CTA as the most favorable based on radiological characteristics (such as perforator diameter, course, and location), rather than on intraoperative confirmation. The study focuses on image-based identification, and intraoperative validation was beyond the scope of the current proof-of-concept work; however, it forms part of future work. This work aims to improve the efficiency, reproducibility, and clinical applicability of AI-driven preoperative planning, thereby enhancing patient care and patient experience.</p>
    </sec>
    <sec id="sec2">
      <title>METHODS</title>
      <p>This research was conducted as an ongoing collaborative effort between the Department of Plastic, Reconstructive, and Aesthetic Surgery at Brussels University Hospital (UZ Brussel), the Department of Radiology at Brussels University Hospital (UZ Brussel), and the Department of Electronics and Informatics, Vrije Universiteit Brussel (ETRO.RDI). These institutes have experience in developing CAD systems for various clinical applications<sup>[<xref ref-type="bibr" rid="B15">15</xref>-<xref ref-type="bibr" rid="B17">17</xref>]</sup>. All procedures were conducted in accordance with approved ethical protocols and anonymization standards (Project ID 23426_AI DIEP; BUN number: 1432023000307; Medical Ethics Committee of UZ Brussel/VUB). The workflow is shown in <xref ref-type="fig" rid="fig1">Figure 1</xref>, and the steps are described below.</p>
      <fig id="fig1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>Workflow overview of automatic vessel segmentation. Step 1: Dual-view maximum intensity projection (MIP) annotation is performed using axial and coronal MIP planes to reconstruct the perforator vessel paths with improved spatial consistency. Step 2: An anatomically aware region of interest (ROI) is cropped and individually adapted for each patient to precisely target the expected anatomical location of the perforators. Step 3: The vascular tree within the defined ROI is automatically segmented using a deep-learning model.</p>
        </caption>
        <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="ais6001.fig.1.jpg" />
      </fig>
      <sec id="sec2-1">
        <title>Imaging data and expert annotation</title>
        <p>The study used a curated dataset of 504 anonymized CTA scans retrospectively collected at UZ Brussel (Brussels, Belgium) between January 2014 and December 2023 from patients evaluated for DIEP flap breast reconstruction. The inclusion criterion was that a CTA was performed for preoperative DIEP flap planning. CTA scans acquired for other clinical indications were excluded. Scans were acquired with standardized parameters using a GE Medical Systems Discovery CT750 HD scanner (GE Healthcare, Chicago, Illinois, United States) to ensure consistency across the dataset. All patients were scanned using the same DIEP CTA protocol as outlined by Karunanithy <italic>et al.</italic><sup>[<xref ref-type="bibr" rid="B18">18</xref>]</sup>. From this dataset, 55 patients were randomly selected for manual volumetric annotation of the DIEP vascular anatomy. A train-test split strategy was used. Five scans were used for model hyperparameter tuning, and the remaining 50 were used for 5-fold cross-validation.</p>
        <p>A separate cohort of 100 patients was randomly selected for manual annotation of the umbilical landmark location (x, y, z coordinates).</p>
        <p>Each CTA volume underwent preprocessing, including conversion from digital imaging and communications in medicine (DICOM) to neuroimaging informatics technology initiative (NIfTI) format, intensity normalization and clipping to the range of 0-1, and resampling to an isotropic voxel size of 1 × 1 × <InlineParagraph>1 mm.</InlineParagraph> Statistical analyses were performed using Python 3.11 (Python Software Foundation, Wilmington, Delaware, United States).</p>
      </sec>
      <sec id="sec2-2">
        <title>Depth-aware back-projection of MIP images</title>
        <p>To overcome the challenges of annotating narrow, low-contrast perforator vessels in full-volume CTA, a novel dual-maximum intensity projection (MIP) annotation workflow was implemented [<xref ref-type="fig" rid="fig2">Figure 2</xref>]. The first major step was to back-project MIP labels into the CTA image domain. This approach mirrors established clinical practice, in which radiologists rely on slab-based MIPs for visualizing intricate vascular networks. Annotators first independently reviewed and annotated 10-mm-thick axial and coronal MIP slabs, which enhanced vessel conspicuity and continuity. A depth-map algorithm was then used to recover the precise 3D spatial coordinates lost during MIP creation. Annotations were performed in 3D Slicer (The Slicer Community)<sup>[<xref ref-type="bibr" rid="B19">19</xref>]</sup>, and these MIP-enhanced annotations were back-projected into the original 3D CTA volume.</p>
        <fig id="fig2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Dual-MIP annotation workflow, starting from the raw CTA: Step 1: Generate 10-mm-thick axial and coronal MIP slabs. Step 2: Annotate vessel branches on both MIPs in 3D Slicer. Step 3: Back-project these 3D labels into the CTA volume via a depth-map algorithm to obtain the final refined annotation. MIP: Maximum intensity projection; CTA: computed tomography angiography; 3D: three-dimensional; ROI: region of interest.</p>
          </caption>
          <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="ais6001.fig.2.jpg" />
        </fig>
      </sec>
      <sec id="sec2-3">
        <title>Automated region of interest localization</title>
        <p>To optimize computational efficiency and limit the analysis to clinically relevant anatomy, an automated region of interest (ROI) localization module was developed [<xref ref-type="fig" rid="fig3">Figure 3</xref>]. Processing full CTA volumes is both time- and memory-intensive because large portions of the volume are unrelated to perforator mapping.</p>
        <fig id="fig3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Schematic overview of the anatomy-aware ROI cropping methodology. ROI: Region of interest; CTA: computed tomography angiography.</p>
          </caption>
          <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="ais6001.fig.3.jpg" />
        </fig>
        <p>The ROI localization was implemented using a two-step approach. First, atlas-free landmark-guided segmentation was performed using pretrained models from TotalSegmentator<sup>[<xref ref-type="bibr" rid="B20">20</xref>]</sup>. The volume of interest was constrained using anatomically identifiable landmarks: the left and right anterior superior iliac spines (ASIS) defined the lateral boundaries, the pubic tubercle defined the caudal limit, and a point 5 cm inferior to the xiphisternum defined the cranial limit. The anterior boundary was set as the most anterior skin surface, with an additional 2-cm margin, whereas the posterior boundary corresponded to the plane of the bilateral ASIS. Second, the umbilicus was localized using a heatmap regression approach based on a U-Net architecture (<italic>n</italic> = 100). To detect the location of the umbilicus, a heatmap-regression 3D U-Net method was introduced that predicts the heatmap peak at the umbilicus location. The method used binary cross-entropy loss and a 256 × 256 × 256 voxel input patch for training. The aim was to establish the umbilicus as the anatomical reference point for future automated quantitative reporting of perforator location.</p>
      </sec>
      <sec id="sec2-4">
        <title>Automatic vessel segmentation</title>
        <p>Within the localized ROI, a deep learning model was trained for perforator vessel segmentation. The 3D Swin UNETR (Swin Transformer-based) architecture and the Attention U-Net architectures were evaluated for their ability to capture long-range dependencies and global context, which are crucial for segmenting elongated and tortuous vascular networks<sup>[<xref ref-type="bibr" rid="B21">21</xref>,<xref ref-type="bibr" rid="B22">22</xref>]</sup>. These models process 3D image patches and learn to identify and delineate perforator vessels.</p>
        <p>The training strategy for the deep learning model involved:</p>
        <p>1. Patch sampling: Patches of 96 × 96 × 96 voxels were extracted to ensure a balanced representation of vessel and non-vessel regions, enabling the network to learn contextual features from both vessel-rich and vessel-poor areas.</p>
        <p>2. Loss function: A composite loss function combining Dice similarity coefficient (Dice) loss (for overall overlap) and cross-entropy loss (for pixel-level accuracy) was used, with a weighting that prioritized Dice loss to encourage correct overall region shape and overlap.</p>
        <p>3. Data augmentation: A comprehensive set of 3D data augmentations (e.g., random flips, rotations, scaling, and noise injection) was applied to simulate inter-scanner and inter-patient variability, enhancing the model’s generalizability.</p>
        <p>4. Optimization: The AdamW optimizer<sup>[<xref ref-type="bibr" rid="B23">23</xref>]</sup> was used with a learning rate scheduler combining linear warmup and cosine annealing to stabilize and fine-tune training.</p>
        <p>Post-processing steps were systematically applied to the raw model outputs to refine predictions and enhance topological consistency, both of which are critical for clinical interpretation. These steps included thresholding, removing small, isolated components, extracting centerlines, and bridging minor discontinuities, thereby ensuring continuous and anatomically plausible vessel segmentations.</p>
        <p>The primary development environment was built using an NVIDIA Ampere GPU with 40 GB of memory. Software development and experimentation were performed in Python using PyTorch (Meta AI, New York City, United States) and the Medical Open Network for AI (MONAI) (NVIDIA, National Institutes of Health, and King’s College London) frameworks<sup>[<xref ref-type="bibr" rid="B24">24</xref>]</sup>.</p>
      </sec>
      <sec id="sec2-5">
        <title>Experimental enhancements</title>
        <p>To address limitations of the baseline model, particularly incomplete segmentation of small vessels and discontinuities in fine branching structures, two targeted enhancements were evaluated for sparse perforator networks.</p>
        <p>1. Continuity-Aware loss function: To improve sensitivity to thin, tubular structures,</p>
        <p>the baseline loss function was modified by adding the centerline Dice (clDice) loss component.</p>
        <p>2. Multiclass segmentation: To bias the network toward underrepresented, thin perforators primarily located in the adipose region, a fat mask was used to first create multiclass ground-truth annotations and to increase patch sampling within the fat regions.</p>
      </sec>
      <sec id="sec2-6">
        <title>Outcome measures - validation metrics</title>
        <p>To assess how well the segmentation model performed in identifying and outlining the perforator vessels, we used a combination of standard and anatomy-specific evaluation metrics<sup>[<xref ref-type="bibr" rid="B25">25</xref>]</sup> selected for their clinical relevance in flap planning:</p>
        <p>• Dice: This metric measures how closely the segmented vessels match the reference anatomy in terms of overall shape and volume. A score of 1 indicates perfect overlap, whereas lower scores indicate discrepancies in vessel detection.</p>
        <p>• clDice: This metric evaluates how well the model captures the course and branching pattern of the vessels by comparing the predicted centerlines or “skeletons” with the reference centerlines. It is especially useful for assessing whether fine branches are continuous and correctly traced, which is important for surgical planning, where vessel continuity matters.</p>
        <p>• Recall: This metric indicates how many reference vessels were correctly identified by the model. Higher recall indicates fewer missed perforators, which is particularly important for small or deep branches in adipose tissue.</p>
        <p>• 95th Percentile Hausdorff Distance (HD95): This metric measures the worst-case spatial error between the segmented vessel borders and the true anatomy while reducing the influence of extreme outliers. It provides insight into how precisely the vessel edges are mapped, which can affect flap design or intraoperative navigation.</p>
        <p>Together, these outcome measures provide a balanced assessment of the model’s performance, including whether it accurately captures the key vessels, preserves their connectivity, and achieves a level of detail suitable for microsurgical decision-making. Statistical analysis was performed using Wilcoxon signed-rank tests with Bonferroni correction, with results considered significant at <italic>P</italic> &lt; 0.05.</p>
      </sec>
    </sec>
    <sec id="sec3">
      <title>RESULTS</title>
	  <sec id="sec3-1">
      <title>Annotation strategy</title>
      <p>The depth-aware back-projection of MIP images substantially reduced annotation time by 60%-70% (from approximately 4 h to approximately 90 min per patient) and significantly improved vessel continuity while reducing the number of missed perforators. Additionally, this approach better aligned the annotation process with current clinical practice.</p>
	  </sec>
      <sec id="sec3-2">
        <title>Automated ROI localization</title>
        <p>The atlas-free, segmentation-based ROI cropping strategy demonstrated excellent performance, successfully generating skeletal and sternum segmentations and automatically extracting all target anatomical landmarks in 100% of the evaluated scans. The process was robust, with a mean inference time of 28 ± 5 s per volume, supporting its integration into a rapid clinical workflow.</p>
        <p>The U-Net + heatmap-based umbilicus detector achieved a mean target registration error (TRE) of approximately 2 mm in the validation cohort, enhancing the stability and reproducibility of ROI definition, particularly in cases with variable abdominal morphology or surgical scarring.</p>
      </sec>
      <sec id="sec3-3">
        <title>Baseline segmentation performance</title>
        <p>The baseline segmentation model, Swin UNETR, demonstrated robust performance for identifying the main perforator trunks and proximal secondary branches. It achieved a median global Dice coefficient of 0.58 [0.54, 0.63], significantly outperforming Attention U-Net (Dice 0.55 [0.49, 0.60], <italic>P</italic> &lt; 0.05). Recall was also significantly higher for Swin UNETR than for Attention U-Net (0.54 [0.47, 0.60] versus 0.43 [0.35, 0.50], <InlineParagraph><italic>P</italic> &lt; 0.05),</InlineParagraph> reflecting superior sensitivity to actual vessel voxels. Boundary delineation was more consistent with a significantly lower HD95 of 20.0 mm [15.0, 23.8] versus 33.5 mm [25.0, 36.7] (<italic>P</italic> &lt; 0.05), indicating closer alignment with the ground truth. Topological continuity, measured by clDice, was similar between the two models (0.41 for Swin UNETR versus 0.40 for Attention U-Net).</p>
        <p>Visually, Swin UNETR reliably recovered the principal perforators and main branches but consistently missed smaller-caliber branches and produced small gaps at vessel bifurcations, particularly in deeper adipose tissue. In simpler anatomical cases, such as sparse perforator networks, performance was notably better, with Dice values reaching up to 0.72.</p>
        <p>Based on this superior performance, Swin UNETR was adopted as the baseline model for subsequent experiments.</p>
      </sec>
      <sec id="sec3-4">
        <title>Experimental enhancements</title>
        <p>Incorporating a continuity-aware component, clDice, into the standard Dice and cross-entropy loss functions significantly improved model performance. Values are presented to two decimal places, with 95% confidence intervals (CIs) shown in brackets.</p>
        <p>• Global Dice improved to 0.60 [0.54, 0.65] (<italic>P</italic> ≤ 0.05), compared to a baseline (experiment-specific control) Dice of 0.58 [0.54, 0.63].</p>
        <p>• Recall increased significantly to 0.58 [0.51, 0.64] (<italic>P</italic> ≤ 0.05), reflecting improved sensitivity and fewer missed vessels.</p>
        <p>• HD95 decreased notably to 17.5 mm [11.1, 23.4], highlighting tighter and more accurate vessel boundary delineation.</p>
        <p>• clDice remained stable at 0.41 [0.38, 0.46], indicating maintained or slightly improved vessel continuity.</p>
        <p>• This enhancement produced smoother and more anatomically coherent vessel segmentations, reducing the fragmentation of vessel branches critical for preoperative perforator mapping.</p>
        <p>Multiclass segmentation also significantly improved segmentation outcomes by leveraging a pretrained fat and muscle segmentation model to guide region-aware learning<sup>[<xref ref-type="bibr" rid="B26">26</xref>]</sup>:</p>
        <p>• Global Dice increased to 0.60 [0.56, 0.63], <italic>P</italic> ≤ 0.05 from the experiment-specific control baseline of 0.58, matching the continuity-aware method’s overall gain.</p>
        <p>• Notably, Dice in the challenging adipose compartment increased to 0.52 [0.47, 0.57] compared with the baseline value of 0.49 [0.40, 0.55], representing a clinically meaningful improvement in the region most relevant to DIEP flap harvesting.</p>
        <p>• HD95 decreased slightly to 19.15 mm [12.2, 24.7], indicating better boundary precision in adipose areas.</p>
        <p>• clDice showed moderate improvement to approximately 0.41 [0.35, 0.44] (<italic>P</italic> ≤ 0.05), suggesting better continuity, although distal vessel tips remained partially under-segmented.</p>
        <p>• From a clinical perspective, this method enhanced visibility and segmentation quality precisely in the critical fat regions used for surgical planning, without compromising global segmentation quality.</p>
        <p>
          <xref ref-type="fig" rid="fig4">Figure 4</xref> compares two representative cases: complex perforator anatomy (Example 1) and sparse perforators (Example 2). For each case, the ground truth, baseline, and both enhanced model outputs are shown to illustrate differences in segmentation predictions.</p>
        <fig id="fig4" position="float" width="550">
          <label>Figure 4</label>
          <caption>
            <p>Qualitative comparison of two representative cases: complex anatomy (Example 1) and sparse anatomy (Example 2), showing (top to bottom) ground truth, baseline Swin UNETR, and two enhanced models. Black boxes report the median Dice scores for each method. Enhanced outputs exhibit markedly improved branch continuity and reduced false‑positive suppression. Swin UNETR: Swin UNEt TRansformers.</p>
          </caption>
          <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="ais6001.fig.4.jpg" />
        </fig>
      </sec>
    </sec>
    <sec id="sec4">
      <title>DISCUSSION</title>
      <p>This study demonstrates that a deep-learning-based segmentation pipeline (Swin UNETR) accurately identifies principal DIEA perforators, achieving robust segmentation performance (median Dice: 0.58). Although baseline results effectively delineated dominant perforator trunks, segmentation of smaller-caliber vessels, particularly in deep adipose tissue, remained challenging and resulted in fragmented vessel branches and discontinuities that could affect surgical planning. Targeted enhancements addressing vessel continuity (continuity-aware loss functions) and adipose-tissue specificity (multiclass segmentation) significantly mitigated these limitations. Continuity-aware training notably improved global Dice (0.60, <italic>P</italic> &lt; 0.05), recall (0.58; <italic>P</italic> &lt; 0.05), and boundary accuracy (HD95: 17.5 mm; <italic>P</italic> &lt; 0.05), whereas multiclass segmentation significantly enhanced adipose-specific accuracy (Dice improved to 0.60; <italic>P</italic> &lt; 0.05). These results underscore the clinical importance of specialized training strategies, particularly when segmenting complex adipose anatomy, enhancing the practical utility of automated perforator mapping for DIEP flap breast reconstruction.</p>
      <p>Although further improvements are still needed, the development of this AI-based CAD pipeline represents a crucial step toward streamlining preoperative DIEP flap planning. For clinicians involved in breast cancer care, the current manual workflow consumes valuable radiologist and surgeon time while introducing variability that can affect surgical outcomes. Automated ROI localization, with its 100% success rate and sub-30-second processing time, directly addresses the need for efficient and consistent anatomical targeting, thereby freeing experts’ time for critical decision-making.</p>
      <p>The core innovation lies in deep-learning-based vessel segmentation. Although the baseline Swin UNETR model already demonstrated robust detection of principal perforators, the persistent challenge of segmenting fine branches in adipose tissue remained a key focus<sup>[<xref ref-type="bibr" rid="B21">21</xref>]</sup>. This challenge stems from factors such as annotation noise, partial volume effects (where vessels are smaller than a single voxel), irregular vessel geometry, and class imbalance in the training data.</p>
      <p>The success of continuity-aware loss functions and multiclass segmentation may be relevant for surgical decision-making across various disciplines. The combined continuity-aware loss directly addresses the clinical need for continuous, unfragmented vessel paths. By explicitly penalizing discontinuities, this loss function helps the model produce segmentations that are both volumetrically accurate and topologically sound. Similarly, the multiclass segmentation approach, by specifically improving performance in adipose tissue, addresses a challenging area where perforators are often low-contrast and difficult to delineate, thereby enhancing the reliability across diverse patient anatomies. This has broader implications for surgical procedures requiring precise navigation through complex soft tissue.</p>
      <p>These advancements collectively contribute to a system that may reduce planning time, enhance reproducibility, and minimize operator dependence, thereby broadening access to DIEP flap reconstruction<sup>[<xref ref-type="bibr" rid="B10">10</xref>,<xref ref-type="bibr" rid="B11">11</xref>]</sup>. The pipeline’s ability to reliably recover both primary trunks and secondary branches, corroborated by quantitative metrics and qualitative review, demonstrates its potential to support downstream applications such as automated flap design and computational hemodynamics.</p>
      <p>However, it is crucial to acknowledge the several limitations. The dataset, while expertly annotated, has sparse and uneven density for annotation coverage of perforators, particularly for fine perforators in adipose tissue, which still limits the achievable recall. Nevertheless, DIEP flap harvest primarily relies on moderate- to large-caliber perforators, thereby reducing the practical importance of capturing extremely fine vessels. Furthermore, the evaluations were conducted offline on a single-center dataset; therefore, further prospective clinical validation across multiple centers and diverse patient populations is essential to confirm real-world utility and generalizability.</p>
      <p>A clinical study is currently underway to assess this fully automated CAD pipeline against radiological reports, and the findings will be disseminated. The project compares the coordinates generated by the CAD pipeline with those reported by radiologists to assess accuracy. Ethical approval has been obtained, and the project is ongoing (Project ID: 24473_AI DIEP; BUN: 1432024000326; Local Ethics Committee UZB-VUB). Future work will also focus on integrating quantitative perforator metrics (e.g., vessel diameter and intramuscular course length) to advance the system from a detection aid to a comprehensive decision-support module.</p>
      <p>In this work, we established a high-precision segmentation model for the deep inferior epigastric artery system, demonstrating a proof-of-concept for the first fully automated CAD pipeline for DIEP flap surgery based on real clinical data and AI-driven analysis. This includes a reproducible annotation protocol for sub-millimetric abdominal perforators, a patient-specific ROI localization strategy, and robust segmentation of principal and secondary perforators using a 3D Swin UNETR model. Expert radiologist validation confirms the clinical relevance of the identified vessels. Although the finest subcutaneous branches remain challenging, the overall performance validates the feasibility of CAD-assisted DIEP planning.</p>
      <p>These results represent an important step toward a deployable CAD system for breast cancer reconstruction and microsurgery more broadly. The core components demonstrate the potential to streamline preoperative workflows, enhance reproducibility, and reduce operator dependence. The immediate next milestones involve clinical validation to quantify reductions in planning time and to compare AI-suggested perforators with those selected by radiologists in real-world scenarios. These studies have received ethical approval and are currently ongoing. Further enhancements will include integrating quantitative perforator metrics to transform the system into a comprehensive decision-support tool and generalizing the framework to other perforator-based flaps. An automated CAD-based system may eventually enable customizable perforator ranking, enabling surgeons to prioritize the variables most relevant to their operative strategy. In other words, the system could move beyond purely radiological criteria and incorporate surgeon-specific selection parameters, potentially improving concordance between imaging-based recommendations and intraoperative decision-making. Ultimately, embedding this AI-driven workflow into clinical planning platforms may pave the way for personalized and reproducible reconstructive microsurgery.</p>
    </sec>
  </body>
  <back>
    <sec>
      <title>DECLARATIONS</title>
      <sec>
        <title>Authors’ contributions</title>
        <p>Conceived the study, defined the clinical framework, supervised the CAD pipeline development, and drafted the manuscript: Kapila AK</p>
        <p>Developed the CAD pipeline and conducted the methodological experiments: Lamtenzan Marcos D</p>
        <p>Contributed to the technical development, implementation, and validation of the deep learning methods: Ceranka J</p>
        <p>Supervised and validated the expert perforator annotations and provided radiological expertise: Brussaard C</p>
        <p>Contributed to the development and validation of the vessel segmentation methodology: Boonen PP</p>
        <p>Contributed to clinical data preparation and coordination of ethical approval and administrative processes: Ledegen L</p>
        <p>Supervised the technical aspects of the project and provided expertise in medical image analysis: Vandemeulebroucke J</p>
        <p>Provided overall clinical supervision and contributed to the study design and interpretation: Hamdi M</p>
        <p>All authors contributed to manuscript revision and approved the final version.</p>
      </sec>
      <sec>
        <title>Availability of data and materials</title>
        <p>The data are securely stored and can be made available upon reasonable request.</p>
      </sec>
      <sec>
        <title>AI and AI-assisted tools statement</title>
        <p>Not applicable.</p>
      </sec>
      <sec>
        <title>Financial support and sponsorship</title>
        <p>None.</p>
      </sec>
      <sec>
        <title>Conflicts of interest</title>
        <p>Ceranka J, Boonen PT, and Vandemeulebroucke J are affiliated with imec (Interuniversity Micro-Electronic Center, Leuven, Belgium). All other authors declare no conflicts of interest.</p>
      </sec>
      <sec>
        <title>Ethical approval and consent to participate</title>
        <p>Ethical approval was obtained from the institutional review board, as outlined in the manuscript. All procedures were conducted in accordance with approved ethical clearances and anonymization protocols (Project ID 23426_AI DIEP, BUN number: 1432023000307, The Medical Ethics Committee of UZ Brussel/VUB). The dataset consists of fully anonymized CTA scans used exclusively for software development purposes. In accordance with institutional review board (IRB) policy, a waiver of informed consent was granted.</p>
      </sec>
      <sec>
        <title>Consent for publication</title>
        <p>Not applicable.</p>
      </sec>
      <sec>
        <title>Copyright</title>
        <p>© The Authors 2026.</p>
      </sec>
    </sec>
    <ref-list>
      <ref id="B1">
        <label>1</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Pirro</surname>
              <given-names>O</given-names>
            </name>
            <name>
              <surname>Mestak</surname>
              <given-names>O</given-names>
            </name>
            <name>
              <surname>Vindigni</surname>
              <given-names>V</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>Comparison of patient-reported outcomes after implant versus autologous tissue breast reconstruction using the BREAST-Q</article-title>
          <source>Plast Reconstr Surg Glob Open</source>
          <year>2017</year>
          <volume>5</volume>
          <fpage>e1217</fpage>
          <pub-id pub-id-type="doi">10.1097/GOX.0000000000001217</pub-id>
          <pub-id pub-id-type="pmid">28203513</pub-id>
          <pub-id pub-id-type="pmcid">PMC5293311</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B2">
        <label>2</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Toyserkani</surname>
              <given-names>NM</given-names>
            </name>
            <name>
              <surname>Jørgensen</surname>
              <given-names>MG</given-names>
            </name>
            <name>
              <surname>Tabatabaeifar</surname>
              <given-names>S</given-names>
            </name>
            <name>
              <surname>Damsgaard</surname>
              <given-names>T</given-names>
            </name>
            <name>
              <surname>Sørensen</surname>
              <given-names>JA</given-names>
            </name>
          </person-group>
          <article-title>Autologous versus implant-based breast reconstruction: a systematic review and meta-analysis of Breast-Q patient-reported outcomes</article-title>
          <source>J Plast Reconstr Aesthet Surg</source>
          <year>2020</year>
          <volume>73</volume>
          <fpage>278</fpage>
          <lpage>85</lpage>
          <pub-id pub-id-type="doi">10.1016/j.bjps.2019.09.040</pub-id>
          <pub-id pub-id-type="pmid">31711862</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B3">
        <label>3</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Eltahir</surname>
              <given-names>Y</given-names>
            </name>
            <name>
              <surname>Krabbe-Timmerman</surname>
              <given-names>IS</given-names>
            </name>
            <name>
              <surname>Sadok</surname>
              <given-names>N</given-names>
            </name>
            <name>
              <surname>Werker</surname>
              <given-names>PMN</given-names>
            </name>
            <name>
              <surname>de</surname>
              <given-names>Bock GH</given-names>
            </name>
          </person-group>
          <article-title>Outcome of quality of life for women undergoing autologous versus alloplastic breast reconstruction following mastectomy: a systematic review and meta-analysis</article-title>
          <source>Plast Reconstr Surg</source>
          <year>2020</year>
          <volume>145</volume>
          <fpage>1109</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.1097/PRS.0000000000006720</pub-id>
          <pub-id pub-id-type="pmid">32332522</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B4">
        <label>4</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Hamdi</surname>
              <given-names>M</given-names>
            </name>
            <name>
              <surname>Kapila</surname>
              <given-names>AK</given-names>
            </name>
            <name>
              <surname>Waked</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Current status of autologous breast reconstruction in Europe: how to reduce donor site morbidity</article-title>
          <source>Gland Surg</source>
          <year>2023</year>
          <volume>12</volume>
          <fpage>1760</fpage>
          <pub-id pub-id-type="doi">10.21037/gs-23-288</pub-id>
          <pub-id pub-id-type="pmid">38229849</pub-id>
          <pub-id pub-id-type="pmcid">PMC10788572</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B5">
        <label>5</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Kapila</surname>
              <given-names>AK</given-names>
            </name>
            <name>
              <surname>Iyer</surname>
              <given-names>H</given-names>
            </name>
            <name>
              <surname>Mohanna</surname>
              <given-names>P</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>Enhancing abdominal donor site aesthetics: the double diamond drainless closure with barbed progressive tension sutures</article-title>
          <source>J Plast Reconstr Aesthet Surg</source>
          <year>2024</year>
          <volume>96</volume>
          <fpage>53</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.1016/j.bjps.2024.07.020</pub-id>
          <pub-id pub-id-type="pmid">39053033</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B6">
        <label>6</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Kapila</surname>
              <given-names>AK</given-names>
            </name>
            <name>
              <surname>Kempny</surname>
              <given-names>T</given-names>
            </name>
            <name>
              <surname>Knoz</surname>
              <given-names>M</given-names>
            </name>
            <name>
              <surname>Holoubek</surname>
              <given-names>J</given-names>
            </name>
            <name>
              <surname>Lipovy</surname>
              <given-names>B</given-names>
            </name>
            <name>
              <surname>Hamdi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>An algorithm in managing deep inferior epigastric vessel interruption in free flap breast reconstruction</article-title>
          <source>Plast Reconstr Surg Glob Open</source>
          <year>2023</year>
          <volume>11</volume>
          <fpage>e4938</fpage>
          <pub-id pub-id-type="doi">10.1097/GOX.0000000000004938</pub-id>
          <pub-id pub-id-type="pmid">37035127</pub-id>
          <pub-id pub-id-type="pmcid">PMC10079345</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B7">
        <label>7</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Boer</surname>
              <given-names>VB</given-names>
            </name>
            <name>
              <surname>van</surname>
              <given-names>Wingerden JJ</given-names>
            </name>
            <name>
              <surname>Wever</surname>
              <given-names>CF</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>Concordance between preoperative computed tomography angiographic mapping and intraoperative perforator selection for deep inferior epigastric artery perforator flap breast reconstructions</article-title>
          <source>Gland Surg</source>
          <year>2017</year>
          <volume>6</volume>
          <fpage>620</fpage>
          <pub-id pub-id-type="doi">10.21037/gs.2017.09.13</pub-id>
          <pub-id pub-id-type="pmid">29302477</pub-id>
          <pub-id pub-id-type="pmcid">PMC5750307</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B8">
        <label>8</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Mavioso</surname>
              <given-names>C</given-names>
            </name>
            <name>
              <surname>Araújo</surname>
              <given-names>RJ</given-names>
            </name>
            <name>
              <surname>Oliveira</surname>
              <given-names>HP</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>Automatic detection of perforators for microsurgical reconstruction</article-title>
          <source>Breast</source>
          <year>2020</year>
          <volume>50</volume>
          <fpage>19</fpage>
          <lpage>24</lpage>
          <pub-id pub-id-type="doi">10.1016/j.breast.2020.01.001</pub-id>
          <pub-id pub-id-type="pmid">31972533</pub-id>
          <pub-id pub-id-type="pmcid">PMC7375543</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B9">
        <label>9</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Kapila</surname>
              <given-names>AK</given-names>
            </name>
            <name>
              <surname>Iyer</surname>
              <given-names>H</given-names>
            </name>
            <name>
              <surname>Mohanna</surname>
              <given-names>P</given-names>
            </name>
            <name>
              <surname>Mughal</surname>
              <given-names>M</given-names>
            </name>
            <name>
              <surname>Hamdi</surname>
              <given-names>M</given-names>
            </name>
            <name>
              <surname>Rose</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>The impact of physical activity on patient-reported outcomes following deep inferior epigastric perforator flap breast reconstruction</article-title>
          <source>J Plast Reconstr Aesthet Surg</source>
          <year>2024</year>
          <volume>97</volume>
          <fpage>6</fpage>
          <lpage>12</lpage>
          <pub-id pub-id-type="doi">10.1016/j.bjps.2024.07.050</pub-id>
          <pub-id pub-id-type="pmid">39121549</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B10">
        <label>10</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Kapila</surname>
              <given-names>AK</given-names>
            </name>
            <name>
              <surname>De</surname>
              <given-names>Vleminck L</given-names>
            </name>
            <name>
              <surname>Hamdi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Three key domains for optimizing preoperative preparedness in plastic surgery with AI</article-title>
          <source>Plast Aesthet Res</source>
          <year>2025</year>
          <volume>12</volume>
          <fpage>11</fpage>
          <pub-id pub-id-type="doi">10.20517/2347-9264.2024.165</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B11">
        <label>11</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Kapila</surname>
              <given-names>AK</given-names>
            </name>
            <name>
              <surname>Georgiou</surname>
              <given-names>L</given-names>
            </name>
            <name>
              <surname>Hamdi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Decoding the impact of AI on microsurgery: systematic review and classification of six subdomains for future development</article-title>
          <source>Plast Reconstr Surg Glob Open</source>
          <year>2024</year>
          <volume>12</volume>
          <fpage>e6323</fpage>
          <pub-id pub-id-type="doi">10.1097/GOX.0000000000006323</pub-id>
          <pub-id pub-id-type="pmid">39568680</pub-id>
          <pub-id pub-id-type="pmcid">PMC11578208</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B12">
        <label>12</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Araújo</surname>
              <given-names>RJ</given-names>
            </name>
            <name>
              <surname>Garrido</surname>
              <given-names>V</given-names>
            </name>
            <name>
              <surname>Baraças</surname>
              <given-names>CA</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>Computer aided detection of deep inferior epigastric perforators in computed tomography angiography scans</article-title>
          <source>Comput Med Imaging Graph</source>
          <year>2019</year>
          <volume>77</volume>
          <fpage>101648</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compmedimag.2019.101648</pub-id>
          <pub-id pub-id-type="pmid">31476532</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B13">
        <label>13</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Tetteh</surname>
              <given-names>G</given-names>
            </name>
            <name>
              <surname>Efremov</surname>
              <given-names>V</given-names>
            </name>
            <name>
              <surname>Forkert</surname>
              <given-names>ND</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>DeepVesselNet: vessel segmentation, centerline prediction, and bifurcation detection in 3-D angiographic volumes</article-title>
          <source>Front Neurosci</source>
          <year>2020</year>
          <volume>14</volume>
          <fpage>592352</fpage>
          <pub-id pub-id-type="doi">10.3389/fnins.2020.592352</pub-id>
          <pub-id pub-id-type="pmid">33363452</pub-id>
          <pub-id pub-id-type="pmcid">PMC7753013</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B14">
        <label>14</label>
        <nlm-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Saxena</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <comment>Deep learning for personalized preoperative planning of microsurgical free tissue transfers. In: Proceedings of the AAAI Conference on Artificial Intelligence; 2022 Feb 22-Mar 1; Virtual. Washington, D.C.: AAAI.</comment>
          <pub-id pub-id-type="doi">10.1609/aaai.v36i11.21706</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B15">
        <label>15</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Vandemeulebroucke</surname>
              <given-names>J</given-names>
            </name>
            <name>
              <surname>Bernard</surname>
              <given-names>O</given-names>
            </name>
            <name>
              <surname>Rit</surname>
              <given-names>S</given-names>
            </name>
            <name>
              <surname>Kybic</surname>
              <given-names>J</given-names>
            </name>
            <name>
              <surname>Clarysse</surname>
              <given-names>P</given-names>
            </name>
            <name>
              <surname>Sarrut</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Automated segmentation of a motion mask to preserve sliding motion in deformable registration of thoracic CT</article-title>
          <source>Med Phys</source>
          <year>2012</year>
          <volume>39</volume>
          <fpage>1006</fpage>
          <lpage>15</lpage>
          <pub-id pub-id-type="doi">10.1118/1.3679009</pub-id>
          <pub-id pub-id-type="pmid">22320810</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B16">
        <label>16</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Setio</surname>
              <given-names>AAA</given-names>
            </name>
            <name>
              <surname>Traverso</surname>
              <given-names>A</given-names>
            </name>
            <name>
              <surname>de</surname>
              <given-names>Bel T</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>Validation, comparison, and combination of algorithms for automatic detection of pulmonary nodules in computed tomography images: the LUNA16 challenge</article-title>
          <source>Med Image Anal</source>
          <year>2017</year>
          <volume>42</volume>
          <fpage>1</fpage>
          <lpage>13</lpage>
          <pub-id pub-id-type="doi">10.1016/j.media.2017.06.015</pub-id>
          <pub-id pub-id-type="pmid">28732268</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B17">
        <label>17</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Ceranka</surname>
              <given-names>J</given-names>
            </name>
            <name>
              <surname>Wuts</surname>
              <given-names>J</given-names>
            </name>
            <name>
              <surname>Chiabai</surname>
              <given-names>O</given-names>
            </name>
            <name>
              <surname>Lecouvet</surname>
              <given-names>F</given-names>
            </name>
            <name>
              <surname>Vandemeulebroucke</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Computer-aided diagnosis of skeletal metastases in multi-parametric whole-body MRI</article-title>
          <source>Comput Methods Programs Biomed</source>
          <year>2023</year>
          <volume>242</volume>
          <fpage>107811</fpage>
          <pub-id pub-id-type="doi">10.1016/j.cmpb.2023.107811</pub-id>
          <pub-id pub-id-type="pmid">37742486</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B18">
        <label>18</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Karunanithy</surname>
              <given-names>N</given-names>
            </name>
            <name>
              <surname>Rose</surname>
              <given-names>V</given-names>
            </name>
            <name>
              <surname>Lim</surname>
              <given-names>AK</given-names>
            </name>
            <name>
              <surname>Mitchell</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>CT angiography of inferior epigastric and gluteal perforating arteries before free flap breast reconstruction</article-title>
          <source>Radiographics</source>
          <year>2011</year>
          <volume>31</volume>
          <fpage>1307</fpage>
          <lpage>19</lpage>
          <pub-id pub-id-type="doi">10.1148/rg.315105089</pub-id>
          <pub-id pub-id-type="pmid">21918046</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B19">
        <label>19</label>
        <nlm-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Pieper</surname>
              <given-names>S</given-names>
            </name>
            <name>
              <surname>Halle</surname>
              <given-names>M</given-names>
            </name>
            <name>
              <surname>Kikinis</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <comment>3D Slicer. In: 2004 2nd IEEE International Symposium on Biomedical Imaging: Macro to Nano; 2004 Apr 18-18; Arlington, VA, USA. New York: IEEE; 2004. pp. 632-5.</comment>
          <pub-id pub-id-type="doi">10.1109/ISBI.2004.1398617</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B20">
        <label>20</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Wasserthal</surname>
              <given-names>J</given-names>
            </name>
            <name>
              <surname>Breit</surname>
              <given-names>HC</given-names>
            </name>
            <name>
              <surname>Meyer</surname>
              <given-names>MT</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>TotalSegmentator: robust segmentation of 104 anatomic structures in CT images</article-title>
          <source>Radiol Artif Intell</source>
          <year>2023</year>
          <volume>5</volume>
          <fpage>e230024</fpage>
          <pub-id pub-id-type="doi">10.1148/ryai.230024</pub-id>
          <pub-id pub-id-type="pmid">37795137</pub-id>
          <pub-id pub-id-type="pmcid">PMC10546353</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B21">
        <label>21</label>
        <nlm-citation publication-type="book">
          <person-group person-group-type="author">
            <name>
              <surname>Hatamizadeh</surname>
              <given-names>A</given-names>
            </name>
            <name>
              <surname>Nath</surname>
              <given-names>V</given-names>
            </name>
            <name>
              <surname>Tang</surname>
              <given-names>Y</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>D</given-names>
            </name>
            <name>
              <surname>Roth</surname>
              <given-names>HR</given-names>
            </name>
            <name>
              <surname>Xu</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <comment>Swin unetr: Swin transformers for semantic segmentation of brain tumors in MRI images. In: Crimi A, Bakas S, Editors. International MICCAI Brainlesion Workshop; 2021 Sep 27-27; Virtual. Cham: Springer; 2021; 272-84.</comment>
          <pub-id pub-id-type="doi">10.1007/978-3-031-08999-2_22</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B22">
        <label>22</label>
        <nlm-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Tang</surname>
              <given-names>Y</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>D</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>W</given-names>
            </name>
            <etal />
          </person-group>
          <comment>Self-supervised pre-training of swin transformers for 3D medical image analysis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR); 2022 Jun 18-24; New Orleans, LA, USA. New York: IEEE; 2022. pp. 20698-708.</comment>
          <pub-id pub-id-type="doi">10.1109/CVPR52688.2022.02007</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B23">
        <label>23</label>
        <nlm-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Yao</surname>
              <given-names>Z</given-names>
            </name>
            <name>
              <surname>Gholami</surname>
              <given-names>A</given-names>
            </name>
            <name>
              <surname>Shen</surname>
              <given-names>S</given-names>
            </name>
            <name>
              <surname>Mustafa</surname>
              <given-names>M</given-names>
            </name>
            <name>
              <surname>Keutzer</surname>
              <given-names>K</given-names>
            </name>
            <name>
              <surname>Mahoney</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <comment>ADAHESSIAN: an adaptive second order optimizer for machine learning. In: Proceedings of the AAAI Conference on Artificial Intelligence; 2021 Feb 2-9; Virtual. Washington, D.C.: AAAI.</comment>
          <pub-id pub-id-type="doi">10.1609/aaai.v35i12.17275</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B24">
        <label>24</label>
        <nlm-citation publication-type="web">
          <person-group person-group-type="author">
            <name>
              <surname>Cardoso</surname>
              <given-names>MJ</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>W</given-names>
            </name>
            <name>
              <surname>Brown</surname>
              <given-names>R</given-names>
            </name>
            <etal />
          </person-group>
          <comment>MONAI: an open-source framework for deep learning in healthcare. <italic>arXiv</italic> 2022;arXiv:2211.02701. Available from <uri xlink:href="https://doi.org/10.48550/arXiv.2211.02701">https://doi.org/10.48550/arXiv.2211.02701</uri> [accessed 12 May 2026].</comment>
        </nlm-citation>
      </ref>
      <ref id="B25">
        <label>25</label>
        <nlm-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Maier-Hein</surname>
              <given-names>L</given-names>
            </name>
            <name>
              <surname>Reinke</surname>
              <given-names>A</given-names>
            </name>
            <name>
              <surname>Godau</surname>
              <given-names>P</given-names>
            </name>
            <etal />
          </person-group>
          <article-title>Metrics reloaded: recommendations for image analysis validation</article-title>
          <source>Nat Methods</source>
          <year>2024</year>
          <volume>21</volume>
          <fpage>195</fpage>
          <lpage>212</lpage>
          <pub-id pub-id-type="doi">10.1038/s41592-023-02151-z</pub-id>
          <pub-id pub-id-type="pmid">38347141</pub-id>
          <pub-id pub-id-type="pmcid">PMC11182665</pub-id>
        </nlm-citation>
      </ref>
      <ref id="B26">
        <label>26</label>
        <nlm-citation publication-type="web">
          <person-group person-group-type="author">
            <name>
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name>
              <surname>Gu</surname>
              <given-names>H</given-names>
            </name>
            <name>
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <etal />
          </person-group>
          <comment>Automated muscle and fat segmentation in computed tomography for comprehensive body composition analysis. <italic>arXiv</italic> 2025;arXiv:2502.09779. Available from <uri xlink:href="https://doi.org/10.48550/arXiv.2502.09779">https://doi.org/10.48550/arXiv.2502.09779</uri> [accessed 12 May 2026].</comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>