<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JC</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Cancer</journal-id>
      <journal-title>JMIR Cancer</journal-title>
      <issn pub-type="epub">2369-1999</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v11i1e69672</article-id>
      <article-id pub-id-type="pmid">40163848</article-id>
      <article-id pub-id-type="doi">10.2196/69672</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>AI-Based Identification Method for Cervical Transformation Zone Within Digital Colposcopy: Development and Multicenter Validation Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Cahill</surname>
            <given-names>Naomi</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Li</surname>
            <given-names>Jing</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chu</surname>
            <given-names>Yuqing</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Wu</surname>
            <given-names>Tong</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0005-4471-2186</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Wang</surname>
            <given-names>Yuting</given-names>
          </name>
          <degrees>BM</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0004-0582-0347</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Cui</surname>
            <given-names>Xiaoli</given-names>
          </name>
          <degrees>MM</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0007-4109-775X</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Xue</surname>
            <given-names>Peng</given-names>
          </name>
          <degrees>MPH, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3002-8146</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Qiao</surname>
            <given-names>Youlin</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>School of Population Medicine and Public Health</institution>
            <institution>Chinese Academy of Medical Sciences and Peking Union Medical College</institution>
            <addr-line>31 Yard, Beijige Santiao</addr-line>
            <addr-line>Beijing, 100730</addr-line>
            <country>China</country>
            <phone>86 10 8778 8489</phone>
            <email>qiaoy@cicams.ac.cn</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6380-0871</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>School of Population Medicine and Public Health</institution>
        <institution>Chinese Academy of Medical Sciences and Peking Union Medical College</institution>
        <addr-line>Beijing</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Liaoning Cancer Hospital and Institute</institution>
        <institution>Department of Gynecologic Oncology</institution>
        <institution>Cancer Hospital of China Medical University</institution>
        <addr-line>Shenyang</addr-line>
        <country>China</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Youlin Qiao <email>qiaoy@cicams.ac.cn</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>31</day>
        <month>3</month>
        <year>2025</year>
      </pub-date>
      <volume>11</volume>
      <elocation-id>e69672</elocation-id>
      <history>
        <date date-type="received">
          <day>6</day>
          <month>12</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>7</day>
          <month>1</month>
          <year>2025</year>
        </date>
        <date date-type="rev-recd">
          <day>16</day>
          <month>1</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>19</day>
          <month>2</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Tong Wu, Yuting Wang, Xiaoli Cui, Peng Xue, Youlin Qiao. Originally published in JMIR Cancer (https://cancer.jmir.org), 31.03.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Cancer, is properly cited. The complete bibliographic information, a link to the original publication on https://cancer.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://cancer.jmir.org/2025/1/e69672" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>In low- and middle-income countries, cervical cancer remains a leading cause of death and morbidity for women. Early detection and treatment of precancerous lesions are critical in cervical cancer prevention, and colposcopy is a primary diagnostic tool for identifying cervical lesions and guiding biopsies. The transformation zone (TZ) is where a stratified squamous epithelium develops from the metaplasia of simple columnar epithelium and is the most common site of precancerous lesions. However, inexperienced colposcopists may find it challenging to accurately identify the type and location of the TZ during a colposcopy examination.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aims to present an artificial intelligence (AI) method for identifying the TZ to enhance colposcopy examination and evaluate its potential clinical application.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>The study retrospectively collected data from 3616 women who underwent colposcopy at 6 tertiary hospitals in China between 2019 and 2021. A dataset from 4 hospitals was collected for model conduction. An independent dataset was collected from the other 2 geographic hospitals to validate model performance. There is no overlap between the training and validation datasets. Anonymized digital records, including each colposcopy image, baseline clinical characteristics, colposcopic findings, and pathological outcomes, were collected. The classification model was proposed as a lightweight neural network with multiscale feature enhancement capabilities and designed to classify the 3 types of TZ. The pretrained FastSAM model was first implemented to identify the location of the new squamocolumnar junction for segmenting the TZ. Overall accuracy, average precision, and recall were evaluated for the classification and segmentation models. The classification performance on the external validation was assessed by sensitivity and specificity.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The optimal TZ classification model performed with 83.97% classification accuracy on the test set, which achieved average precision of 91.84%, 89.06%, and 95.62% for types 1, 2, and 3, respectively. The recall and mean average precision of the TZ segmentation model were 0.78 and 0.75, respectively. The proposed model demonstrated outstanding performance in predicting 3 types of the TZ, achieving the sensitivity with 95% CIs for TZ1, TZ2, and TZ3 of 0.78 (0.74-0.81), 0.81 (0.78-0.82), and 0.8 (0.74-0.87), respectively, with specificity with 95% CIs of 0.94 (0.92-0.96), 0.83 (0.81-0.86), and 0.91 (0.89-0.92), based on a comprehensive external dataset of 1335 cases from 2 of the 6 hospitals.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Our proposed AI-based identification system classified the type of cervical TZs and delineated their location on multicenter, colposcopic, high-resolution images. The findings of this study have shown its potential to predict TZ types and specific regions accurately. It was developed as a valuable assistant to encourage precise colposcopic examination in clinical practice.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>AI</kwd>
        <kwd>cervical cancer screening</kwd>
        <kwd>transformation zone</kwd>
        <kwd>diagnosis and early treatment</kwd>
        <kwd>lightweight neural network</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Cervical cancer remains the fourth most prevalent cancer among women worldwide [<xref ref-type="bibr" rid="ref1">1</xref>], and it continues to be a leading cause of morbidity and mortality threatening women’s health. Since cervical precancerous lesions often progress to invasive cancer over an extended period, early detection is critical for cervical cancer prevention. Colposcopy serves as a crucial component of cervical cancer screening, providing a preliminary diagnosis for patients based on screening results, which then guides subsequent biopsy and treatment. Although it is a fundamental technique that health care providers can easily teach and implement, the strong subjective nature of colposcopy diagnosis makes it difficult for colposcopists with different qualifications to perform standardized diagnoses and make effective clinical decisions [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. Artificial intelligence (AI) diagnostic technology could resolve the disparities in expertise among clinicians and enhance screening efficiency [<xref ref-type="bibr" rid="ref4">4</xref>].</p>
      <p>The transformation zone (TZ) is where a stratified squamous epithelium develops from the metaplasia of simple columnar epithelium and is the most common site of precancerous lesions. More than 90% of cervical cancers develop within the TZ [<xref ref-type="bibr" rid="ref5">5</xref>], making it a critical region for cervical intraepithelial neoplasia (CIN) diagnosis and early treatment. According to the visibility of the squamocolumnar junction (SCJ), the TZ can be classified into three types: TZ1 (SCJ fully visible), TZ2 (SCJ fully visible under endocervical speculum), and TZ3 (SCJ partially visible or not visible) [<xref ref-type="bibr" rid="ref6">6</xref>]. Accurately identifying the TZ is crucial for diagnosing and treating cervical precancerous lesions. As the TZ moves into the cervical canal with increasing age, endocervical curettage (ECC) is necessary for biopsy-guided pathology [<xref ref-type="bibr" rid="ref7">7</xref>]. If TZ types are not classified, the importance of ECC for the canal may be neglected, leading to missed diagnosis of lesions during colposcopic examination. In addition, excision of the entire TZ is a standard treatment for cervical precancerous lesions. For excisional treatment, TZ types determine the length and depth of the cervix to be excised. In destructive treatments, a prerequisite is that the TZ must be either type 1 or type 2. Therefore, the type and location of the TZ are the determinants of treatment choices, and accurately assessing the TZ is essential for guiding more effective biopsies and precise treatment.</p>
      <p>However, in underserved population, the skills of colposcopists are generally suboptimal, with colposcopic finding accuracy being significantly lower than desired [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. AI-assisted technology could effectively enhance the competencies of colposcopists in these underserved areas. Current evaluation studies [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>] have demonstrated that junior or less-experienced colposcopists can detect abnormal cervical lesions more effectively with AI assistance, which indicates its potential to help reduce missed diagnoses. However, the functions of AI cannot be limited. All colposcopic features were the indicators for assessing colposcopist performance. Among them, the accurate identification of TZ types is essential for implementing effective colposcopy diagnostics and treatment procedures, potentially reducing the number of missed diagnoses and unnecessary biopsy procedures. However, current AI-assisted colposcopy systems or developed AI diagnosis models do not include TZ or SCJ detection during model conduction. Therefore, it is essential to integrate important clinical features into AI model training to improve colposcopy diagnosis efficiency.</p>
      <p>AI colposcopy diagnostic systems remain challenging to distinguish among benign, CIN1, CIN2, and CIN3+ cases during colposcopy examination [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref14">14</xref>], although they achieved over 80% accuracy in detecting high-grade squamous intraepithelial lesions. This difficulty is attributable to the lack of specificity in the CIN-related acetowhite staining characteristics. Typically, normal cervical features, such as immature squamous metaplasia, congenital TZ, inflammation, and epithelium regeneration, may exhibit mild acetowhite reactions similar to those associated with CIN. This similarity implies that relying solely on acetowhite area features can easily lead to misclassification either for AI or in less experienced colposcopist. In AI model training, standardized annotated images of different acetowhite morphologies or fine-grained lesion descriptions may help with a more precise assessment of acetowhite characteristics [<xref ref-type="bibr" rid="ref15">15</xref>]. However, current AI-assisted colposcopy research is constrained by the lack of standardized annotated colposcopy images. From a clinical perspective, the colposcopy guidelines issued by the International Federation for Cervical Pathology and Colposcopy (IFCPC) emphasize that CIN-related acetowhite changes are most commonly found in the TZ, and near the new SCJ, with clear demarcation from the surrounding epithelium [<xref ref-type="bibr" rid="ref6">6</xref>]. Therefore, the TZ region can be used as an indicator to identify lesion areas and can be developed as a learned feature for diagnostic model development, thereby resolving the problem of the lack of annotated colposcopy images. As a result, the multiclassification accuracy of AI-guided colposcopic diagnostic systems may be significantly improved. Therefore, in AI model development, accurately identifying TZ types and the SCJ is a crucial step toward improving AI diagnostic accuracy and guiding biopsies.</p>
      <p>In this study, an AI method is developed and validated for the classification and delineation of the TZ. This method not only has the potential to guide clinical colposcopic examinations in resource-limited health care settings but also encourages the advancement of AI-guided digital colposcopic systems. By incorporating all colposcopic findings, such as TZ type and features of both minor and major lesions, AI-guided digital colposcopy could become a mature assistant for universal clinical colposcopy examinations.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Study Patients</title>
        <p>This retrospective study included 3616 women who underwent colposcopy examinations across 6 multicenter hospitals in China between January 2019 and October 2021. These hospitals were the Gansu Maternity and Child Healthcare Hospital, Second Hospital of Shanxi Medical University, Shenzhen Maternity and Child Healthcare Hospital, Jiangxi Maternity and Child Healthcare Hospital, The Affiliated Hospital of Qingdao University, and Chengdu Women’s and Children’s Central Hospital. The digital clinical records, including cytology, human papillomavirus infection status, colposcopy findings, and pathological results, were collected. All colposcopy images were captured using digital high-definition video colposcopes (Zonsun Healthcare Co Ltd, Edan Instruments). Colposcopy findings, including adequacy, SCJ visibility, TZ determination, and provisional diagnosis, were qualified by colposcopy experts from tertiary hospitals. The “ground truth” for TZ classification and SCJ visibility in this study was determined by an expert panel following IFCPC guidelines.</p>
        <p>The inclusion criteria were as follows: (1) women with complete colposcopy findings and pathological diagnosis, (2) ages between 24 and 65 years, and (3) each record containing at least 5 satisfactory colposcopic images before and after acetic acid staining. The exclusion criteria were (1) all saline solution images (preacetic), since the TZ typically appears after acetic acid staining; (2) poor-quality images, such as overexposed images or those where the cervix was obscured or there was bleeding after the biopsy; (3) inadequacy colposcopic examination; and (4) records with missing TZ types or SCJ visibility. For external validation, an independent dataset was derived from The Affiliated Hospital of Qingdao University and Chengdu Women’s and Children’s Central Hospital. The training dataset was divided into training and test sets in an 8:2 ratio, with 10% of the training set used for validation during model tuning.</p>
      </sec>
      <sec>
        <title>Transformation Zone</title>
        <p>The colposcopy procedures adhered to the standard guidelines for <italic>Colposcopy and Treatment of Cervical Precancer</italic> [<xref ref-type="bibr" rid="ref6">6</xref>]. Time-series images were captured for each case, including 1 original cervix image (saline solution) and at least 4 acetic acid–stained cervix images. The classification of TZ followed guideline criteria. TZ was classified as type 1 when it was entirely located on the ectocervix. In type 2, TZ was partially located within the endocervical canal, but its upper limit could be visualized using auxiliary instruments. Type 3 TZ lies partly or entirely inside the endocervical canal, with its upper limit being partially or completely invisible, even with the aid of auxiliary instruments.</p>
      </sec>
      <sec>
        <title>Image Preprocessing</title>
        <p>The colposcopy images were captured in high resolution. However, they contained irrelevant objects, such as the endocervical speculum, cotton swabs, or large regions of the vaginal wall, which could interfere with the classification model’s ability to extract critical features of the cervix. To address this issue, we used the YOLOv5 network to detect the region of interest. Its single forward pass design enables real-time object detection with high efficiency and precision [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. The integrated cervical region was automatically segmented and examined by a specialist (<xref rid="figure1" ref-type="fig">Figure 1</xref>). It divides an image into an S×S grid cell, predicting the bounding box locations and their associated categories. All segmented colposcopy images were resized to 224×224 pixels to align with the input specifications of the model.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Cervix region of interest detection with bounding box examples in representative images. TZ: transformation zone.</p>
          </caption>
          <graphic xlink:href="cancer_v11i1e69672_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Development of the TZ Classification Model</title>
        <p>Initially, colposcopy images were input into a detection model to determine the cervical region, which was then preprocessed to enhance feature extraction. These data were then applied to a classification model to determine the types of TZ present (<xref rid="figure2" ref-type="fig">Figure 2</xref>A). In the second part of the method, the original images were annotated with new SCJ prompts to guide a general segmentation model in inferring the potential location of the TZ (<xref rid="figure2" ref-type="fig">Figure 2</xref>B).</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>The overall inference process of the TZ identification system consists of two stages: (A) detection and extraction of the entire cervical region from original images, followed by feature extraction using the variant MobileNetV3 and inference TZ1, TZ2, and TZ3 types; and (B) using the original images and images with polygon of the new SCJ as prompts to FastSAM, which then outputs the mask prediction of the out-of-TZ area to infer the new SCJ guidance line. SCJ: squamocolumnar junction; TZ: transformation zone.</p>
          </caption>
          <graphic xlink:href="cancer_v11i1e69672_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>We proposed a variant of the MobileNetV3 architecture for classifying TZ, a lightweight convolutional neural network (CNN) specifically designed for efficient operation on portable devices, which was particularly suitable for deployment in resource-limited settings. The overall model structure is presented in <xref rid="figure3" ref-type="fig">Figure 3</xref>. Our model retained the depthwise separable convolutions and the HardSwish (H-swish) activation function of MobileNetV3, which reduced computational demand and parameters without compromising accuracy. Multiscale convolution modules have been incorporated into the model to effectively extract features at multiple focal points (<xref rid="figure3" ref-type="fig">Figure 3</xref>A), from subtle acetowhite changes in the columnar epithelium to varying TZ-type scopes in the cervix. In addition, we introduced a spatial pyramid pooling module to address features at multiple scales while preserving spatial information in the input images to ensure the richness of feature representation (<xref rid="figure3" ref-type="fig">Figure 3</xref>B). The Squeeze-and-Excitation module is a lightweight attention mechanism designed to automatically prioritize the most diagnostically relevant visual patterns within the MobileNetV3 architecture (<xref rid="figure3" ref-type="fig">Figure 3</xref>C). In our model, the Squeeze-and-Excitation module was embedded to adjust global feature weights (<xref rid="figure3" ref-type="fig">Figure 3</xref>D), enhancing the overall effectiveness of feature extraction. It increased the sensitivity to important features, thereby improving classification performance [<xref ref-type="bibr" rid="ref18">18</xref>].</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>The overall architecture of the proposed model: (A) details of the multiscale convolution (MSC) module; (B) details of the Spatial Pyramid Pooling (SPP) module; (C) structure of the Squeeze-and-Excitation (SE) block module; (D) workflow of the model that included detailed layers of the network; and (E) structure of the inverted residuals module.</p>
          </caption>
          <graphic xlink:href="cancer_v11i1e69672_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Development of the TZ Segmentation Model</title>
        <p>The FastSAM model was used to segment the TZ from the entire cervix in colposcopy images (<xref rid="figure2" ref-type="fig">Figure 2</xref>B). FastSAM was fine-tuned for the specific dataset using a YOLO-based framework [<xref ref-type="bibr" rid="ref19">19</xref>]. Colposcopic images were resized to 640×640 pixels, and the new SCJ was annotated using LabelMe (MIT CSAIL) [<xref ref-type="bibr" rid="ref20">20</xref>]. By selectively freezing some of the backbone layers of the model, the deeper layers were fine-tuned to improve adaptation to the specific task, without compromising the extraction of general features. The model’s performance was further enhanced by using coarse masks as spatial guides, which allowed the model to focus its attention on the most relevant image regions. The model was trained to delineate the new SCJ, thereby identifying possible TZ areas by this critical boundary. TZ is a dynamic region defined by the area between the original SCJ and the new SCJ. Typically, the original SCJ is considered a virtual line, and identifying the new SCJ is crucial for determining the location of the TZ (<xref rid="figure4" ref-type="fig">Figure 4</xref>).</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Illustration of the squamocolumnar junction (SCJ) and the transformation zone.</p>
          </caption>
          <graphic xlink:href="cancer_v11i1e69672_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Software and Hardware</title>
        <p>The study was performed on a Windows 11 operating system with an NVIDIA GeForce RTX 3080 8GB graphics card. The models were implemented using Python (version 3.6.13; Python Software Foundation) and the PyTorch (Meta AI) Library (version 1.7.1).</p>
      </sec>
      <sec>
        <title>Evaluation Metrics</title>
        <p>Experiment performance of the cervical TZ classification model was evaluated using accuracy, precision, recall, <italic>F</italic><sub>1</sub>-score, and average precision, as defined in Equations 1-6.</p>
        <graphic xlink:href="cancer_v11i1e69672_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        <graphic xlink:href="cancer_v11i1e69672_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        <graphic xlink:href="cancer_v11i1e69672_fig11.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        <graphic xlink:href="cancer_v11i1e69672_fig12.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        <graphic xlink:href="cancer_v11i1e69672_fig13.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        <graphic xlink:href="cancer_v11i1e69672_fig14.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        <p>Where TP, FP, and FN denote the true positive, false positive, and false negative predictions, respectively. <italic>r</italic>(<italic>k</italic>) denotes the recall at threshold <italic>k</italic>. <italic>p</italic>(<italic>k</italic>) denotes the precision at threshold <italic>k</italic>, and <italic>n</italic> denotes the number of thresholds. <italic>AP<sub>i</sub></italic> is the average precision for class <italic>i</italic>, and mAP denotes the mean average precision, where <italic>n</italic> is the number of classes.</p>
      </sec>
      <sec>
        <title>Statistical Analysis</title>
        <p>The performance of the classification model was evaluated by comparing it with selected state-of-the-art (SOTA) deep-learning models using metrics such as accuracy, precision, recall, and the area under the precision recall curve, represented by average precision. To evaluate the classification of TZ types, sensitivity, specificity, positive predictive value, and negative predictive value (NPV) were calculated, along with their corresponding 95% CIs. The evaluation metric was defined as the agreement with the expert-provided TZ classification. The demographic characteristics of the study participants were summarized using means and SDs for continuous variables and percentages for categorical variables. A <italic>P</italic> value of less than .05 (two-sided) was considered statistically significant. Statistical analyses were performed using SPSS (version 27.0; IBM Corp) and Python (version 3.7).</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This study was approved by the Institutional Review Board of the Chinese Academy of Medical Sciences and Peking Union Medical College (CAMS and PUMC-IEC-2022-022). Informed consent was not required due to the retrospective nature of the dataset, and all personal information and images were completely anonymized. We commit that all research data will be used for academic research purposes.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>General Information of the Study Dataset</title>
        <p>For the classification modeling, 8335 colposcopy images from 4 hospitals were selected for training and evaluation. These images included consisting of 2788 images of TZ1, 2663 images of TZ2, and 2884 images of TZ3. In the external validation study, 1335 cases were selected from 2 hospitals for model inference to predict the TZ types (<xref rid="figure5" ref-type="fig">Figure 5</xref>). The demographic characteristics of each participant, including age, colposcopic findings, and the distribution of each TZ type, are provided in <xref ref-type="table" rid="table1">Table 1</xref>. TZ types were found to be significantly associated with the participants’ age group distribution (<italic>P</italic>&#60;.001). The mean age of all participants at the time of colposcopy was 37.87 (SD 9.99) years, with the mean age of the TZ3 group being 45.04 (SD 11.69) years.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Flowchart of the study case collection. SCJ: squamocolumnar junction; TZ: transformation zone.</p>
          </caption>
          <graphic xlink:href="cancer_v11i1e69672_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Demographic characteristics and distribution of cervical TZ<sup>a</sup> types. Differences were analyzed using the chi-square test.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="0"/>
            <col width="0"/>
            <col width="0"/>
            <col width="250"/>
            <col width="200"/>
            <col width="190"/>
            <col width="220"/>
            <col width="0"/>
            <col width="110"/>
            <thead>
              <tr valign="top">
                <td colspan="5">Characteristic</td>
                <td>TZ1 (n=594)</td>
                <td>TZ2 (n=604)</td>
                <td>TZ3 (n=137)</td>
                <td colspan="2"><italic>P</italic> value</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="5">
                  <bold>Age group (year), mean (SD)</bold>
                </td>
                <td>34.44 (8.45)</td>
                <td>39.62 (9.64)</td>
                <td>45.04 (11.69)</td>
                <td colspan="2">&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="4">24-29</td>
                <td>140 (23.6)</td>
                <td>62 (10.3)</td>
                <td>10 (7.3)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="4">30-49</td>
                <td>386 (65.0)</td>
                <td>432 (71.5)</td>
                <td>79 (57.7)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="4">50-65</td>
                <td>68 (11.4)</td>
                <td>110 (18.2)</td>
                <td>48 (35.0)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Parity, n (%)</bold>
                </td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="3">0</td>
                <td>159 (26.8)</td>
                <td>95 (15.7)</td>
                <td>18 (13.1)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="3">1-3</td>
                <td>428 (72.1)</td>
                <td>488 (80.8)</td>
                <td>116 (84.7)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="3">&#62;3</td>
                <td>7 (1.2)</td>
                <td>21 (3.5)</td>
                <td>3 (2.2)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Menstrual status, n (%)</bold>
                </td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <break/>
                </td>
                <td colspan="2">Menopause</td>
                <td>25 (4.2)</td>
                <td>71 (11.8)</td>
                <td>43 (31.4)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <break/>
                </td>
                <td colspan="2">No menopause</td>
                <td>569 (95.7)</td>
                <td>533 (88.2)</td>
                <td>94 (68.6)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>SCJ<sup>b</sup> visibility, n (%)</bold>
                </td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <break/>
                </td>
                <td colspan="2">Completely visible</td>
                <td>594 (100.0)</td>
                <td>73 (12.1)</td>
                <td>0 (0)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <break/>
                </td>
                <td colspan="2">Partially visible</td>
                <td>0 (0)</td>
                <td>531 (87.9)</td>
                <td>28 (20.4)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <break/>
                </td>
                <td colspan="2">Invisible</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
                <td>109 (79.6)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="9">
                  <bold>Pathologic diagnosis, n (%)</bold>
                </td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <break/>
                </td>
                <td>Normal or benign</td>
                <td>220 (37)</td>
                <td>188 (31.1)</td>
                <td>26 (19)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <break/>
                </td>
                <td>CIN<sup>c</sup>1</td>
                <td>220 (37)</td>
                <td>213 (35.3)</td>
                <td>51 (37.2)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <break/>
                </td>
                <td>CIN2</td>
                <td>124 (20.8)</td>
                <td>131 (21.7)</td>
                <td>23 (16.8)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <break/>
                </td>
                <td>CIN3</td>
                <td>24 (4)</td>
                <td>63 (10.4)</td>
                <td>13 (9.5)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <break/>
                </td>
                <td>Cancer</td>
                <td>6 (1)</td>
                <td>9 (1.5)</td>
                <td>24 (17.5)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>TZ: transformation zone.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>SCJ: squamocolumnar junction.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>CIN: cervical intraepithelial neoplasia.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>TZ Classification Results</title>
        <p>Cervix detection with a bounding box was used for feature engineering. Proper cervix extraction improved classification accuracy. A total of 8335 cervix images were investigated and resized to 224×224 pixels before being input into the classification model. Out of these, 1663 images were used to evaluate the model’s performance.</p>
        <p>Around 80% of the images were randomly selected as the training set, with the optimal weight parameter model selected during training being used to classify the images in the test set. The validation accuracy of the model gradually improved as the number of epochs increased, as shown in the validation plot (<xref rid="figure6" ref-type="fig">Figure 6</xref>). After approximately 100 epochs, both validation accuracy and training loss stabilized, reaching a peak validation accuracy of 83% at the 200th epoch. The validation accuracy fluctuated significantly during the first 25 epochs, it showed a general upward trend, while training loss rapidly decreased, indicating active learning and adjustment of the model. The model reached an optimal balance point and performed well after the 75th epoch when validation accuracy stabilized and training loss continued to decrease but with minimal changes. Overall, the model demonstrated rapid convergence in the early stages of training, with steady performance improvements in the later stages.</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Validation accuracy and training loss curve for the proposed classification model.</p>
          </caption>
          <graphic xlink:href="cancer_v11i1e69672_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Our proposed model accurately classified cervical TZ types, as shown in <xref ref-type="table" rid="table2">Table 2</xref>. The highest classification accuracy of 83.97% and precision of 83.93% were achieved on the test set. For the 3 TZ types, the sensitivity was 84.74% for TZ1, 78.95% for TZ2, and 87.87% for TZ3, while the specificity was 89.99% for TZ1, 91.98% for TZ2, and 94.03% for TZ3. The detailed values for sensitivity, specificity, positive predictive value, and NPV are presented in <xref ref-type="table" rid="table3">Table 3</xref>. According to the classification performance of TZ types, the sensitivity and NPV for TZ2 were significantly lower.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Comparative performance results of the proposed classification model with other state-of-the-art models.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="270"/>
            <col width="170"/>
            <col width="170"/>
            <col width="150"/>
            <col width="120"/>
            <col width="120"/>
            <thead>
              <tr valign="bottom">
                <td>Model</td>
                <td>Accuracy (%)</td>
                <td>Precision (%)</td>
                <td>Recall (%)</td>
                <td><italic>F</italic><sub>1</sub>-score (%)</td>
                <td>mAP<sup>a</sup> (%）</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>ResNet50</td>
                <td>59.22</td>
                <td>57.84</td>
                <td>56.89</td>
                <td>54.66</td>
                <td>63.13</td>
              </tr>
              <tr valign="top">
                <td>VGG16</td>
                <td>70.33</td>
                <td>69.76</td>
                <td>69.21</td>
                <td>69.19</td>
                <td>74.64</td>
              </tr>
              <tr valign="top">
                <td>ViT</td>
                <td>59.46</td>
                <td>57.64</td>
                <td>57.39</td>
                <td>56.49</td>
                <td>63.76</td>
              </tr>
              <tr valign="top">
                <td>EfficientNet</td>
                <td>62.06</td>
                <td>60.97</td>
                <td>61.18</td>
                <td>60.66</td>
                <td>63.63</td>
              </tr>
              <tr valign="top">
                <td>ShuffleNet</td>
                <td>74.47</td>
                <td>73.9</td>
                <td>73.67</td>
                <td>73.66</td>
                <td>80.98</td>
              </tr>
              <tr valign="top">
                <td>MobileNetV3</td>
                <td>75.06</td>
                <td>74.49</td>
                <td>74.64</td>
                <td>74.55</td>
                <td>82.72</td>
              </tr>
              <tr valign="top">
                <td>Proposed model</td>
                <td>83.97</td>
                <td>83.93</td>
                <td>83.85</td>
                <td>83.86</td>
                <td>92.17</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>mAP: mean average precision.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Classification performance of the proposed classification model on the test set.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="140"/>
            <col width="180"/>
            <col width="180"/>
            <col width="140"/>
            <col width="140"/>
            <col width="220"/>
            <thead>
              <tr valign="bottom">
                <td>TZ<sup>a</sup> types</td>
                <td>Sensitivity (%)</td>
                <td>Specificity (%)</td>
                <td>PPV<sup>b</sup> (%)</td>
                <td>NPV<sup>c</sup>, (%)</td>
                <td>Average precision (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>TZ1</td>
                <td>84.74</td>
                <td>89.99</td>
                <td>80.96</td>
                <td>92.15</td>
                <td>91.84</td>
              </tr>
              <tr valign="top">
                <td>TZ2</td>
                <td>78.95</td>
                <td>91.98</td>
                <td>82.19</td>
                <td>90.30</td>
                <td>89.06</td>
              </tr>
              <tr valign="top">
                <td>TZ3</td>
                <td>87.87</td>
                <td>94.03</td>
                <td>88.64</td>
                <td>93.60</td>
                <td>95.62</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>TZ: transformation zone.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>PPV: positive predictive value.</p>
            </fn>
            <fn id="table3fn3">
              <p><sup>c</sup>NPV: negative predictive value.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>The SOTA deep-learning networks for interpreting colposcopy images were selected to train on our dataset for comparison with our model. <xref ref-type="table" rid="table2">Table 2</xref> presents the experimental results of the test set based on various evaluation metrics. The traditional ResNet50 and Vision Transformer models performed poorly in terms of accuracy and precision, achieving only 59.22% and 57.84%, and 59.46% and 57.64%, respectively. VGG16, with its relatively low-density network structure, performed better than ResNet50 in feature extraction. In addition, we selected 2 lightweight networks for comparison, EfficientNet and ShuffleNet, with higher accuracy of 62.06% and 74.47%. The proposed model achieved the highest accuracy of 83.97% among SOTA models.</p>
      </sec>
      <sec>
        <title>TZ Segmentation Result</title>
        <p>To enhance AI’s clinical guidance, we used a pretrained FastSAM model to segment the TZ region by visualizing the new SCJ. A portion of the training data was used to annotate the new SCJ (<xref rid="figure7" ref-type="fig">Figure 7</xref>A), excluding TZ3, as the new SCJ is located in the endocervix. The segmentation model predicted the negative foreground region indicating the area outside the target TZ region (<xref rid="figure7" ref-type="fig">Figure 7</xref>B). The region boundary of the mask prediction was the predicted new SCJ (<xref rid="figure7" ref-type="fig">Figure 7</xref>C). In the test set, the overall recall and mAP50 (mean average precision at 50% Intersection over Union) of predicted mask were 0.78 and 0.75, respectively.</p>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>The segmentation model inference process. (A) Ground truth of the polygonal new SCJ outline. (B) Predictive segmentation of the out-of-TZ area. (C) Representative prediction of the new SCJ guidance line. SCJ: squamocolumnar junction; TZ: transformation zone.</p>
          </caption>
          <graphic xlink:href="cancer_v11i1e69672_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Validation Results</title>
        <p>An independent dataset of 1335 cases was used to evaluate the generalization of the TZ classification model, distributed as 594 cases of TZ1, 604 cases of TZ2, and 137 cases of TZ3. The overall classification accuracy was 79.33%. The model’s predicted sensitivity for TZ1, TZ2, and TZ3 was 77.3% (95% CI 73.9%-80.6%), 81.1 (95% CI 78.0%-82.3%), and 80.3% (95% CI 73.7%-86.9%), respectively, while the specificity was 94.2% (95% CI 92.4%-95.8%), 83.3% (95% CI 80.6%-85.9%), and 90.7% (95% CI 89.1%-92.3%). The classification performance is presented in <xref rid="figure8" ref-type="fig">Figure 8</xref> and <xref ref-type="table" rid="table4">Table 4</xref>.</p>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Confusion matrix of the proposed classification model in validation. TZ: transformation zone.</p>
          </caption>
          <graphic xlink:href="cancer_v11i1e69672_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Classification performance on the validation dataset.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="140"/>
            <col width="180"/>
            <col width="190"/>
            <col width="190"/>
            <col width="190"/>
            <col width="110"/>
            <thead>
              <tr valign="bottom">
                <td>TZ<sup>a</sup> types</td>
                <td>Sensitivity (%; 95% CI)</td>
                <td>Specificity (%; 95% CI)</td>
                <td>PPV<sup>b</sup> (%; 95% CI)</td>
                <td>NPV<sup>c</sup> (%; 95% CI)</td>
                <td><italic>F</italic><sub>1</sub>-score (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>TZ1 (n=594)</td>
                <td>77.3 (73.9-80.6)</td>
                <td>94.2 (92.4-95.8)</td>
                <td>91.4 (88.9-93.8)</td>
                <td>83.8 (81.3-86.3)</td>
                <td>83.76</td>
              </tr>
              <tr valign="top">
                <td>TZ2 (n=604)</td>
                <td>81.1 (78.0-82.3)</td>
                <td>83.3 (80.6-85.9)</td>
                <td>80.1 (76.8-83.2)</td>
                <td>84.2 (81.5-86.9)</td>
                <td>80.59</td>
              </tr>
              <tr valign="top">
                <td>TZ3 (n=137)</td>
                <td>80.3 (73.7-86.9)</td>
                <td>90.7 (89.1-92.3)</td>
                <td>49.8 (43.0-56.7)</td>
                <td>97.6 (96.7-98.5)</td>
                <td>61.45</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>TZ: transformation zone.</p>
            </fn>
            <fn id="table4fn2">
              <p><sup>b</sup>PPV: positive predictive value.</p>
            </fn>
            <fn id="table4fn3">
              <p><sup>c</sup>NPV: negative predictive value.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>In this study, we proposed a classification and identification model that facilitates clinical colposcopy examinations of cervical TZ. Our AI model achieved an accuracy of 79.33% in the task of classifying the three types of TZ in the external dataset. The sensitivity of TZ3 was 80.3%, and the specificity was 90.7%, which were satisfactory. TZ3 could be accurately differentiated, which further decreased the chance of missed diagnoses of high-grade lesions [<xref ref-type="bibr" rid="ref21">21</xref>], and it was valuable for guiding ECC and recommending appropriate treatments. The AI model had increased sensitivity on TZ2 and lower sensitivity on TZ1 compared to the performance on the test set, which might be attributed to the similarity between TZ1 and TZ2. According to the standard terminology of the IFCPC, TZ1 and TZ2 were both visible either with or without the assistance of an endocervical speculum [<xref ref-type="bibr" rid="ref22">22</xref>]. TZ2 was nearly entirely visible in some cases, while the SCJ was only visible at the endocervical canal’s border. Therefore, it was difficult to differentiate it from TZ1, which frequently required expert evaluation in strict accordance with established guidelines. In clinical practice, the treatment management for TZ1 and TZ2 is generally similar [<xref ref-type="bibr" rid="ref6">6</xref>]. Our AI model demonstrated noninferior performance in classifying them and could be used to assist less experienced or inexperienced colposcopists in the colposcopic examination process. The overall model accuracy from the validation study was similar to the performance during model training, indicating that it is capable of generalizing fairly well.</p>
      </sec>
      <sec>
        <title>Comparison With Other Studies</title>
        <p>In terms of AI techniques, we proposed a method based on the variant MobileNetV3 architecture for cervical TZ classification and FastSAM for segmenting TZ in colposcopy. Currently, only a few deep learning–based models have been developed to classify cervical TZ from colposcopy images. Dash et al [<xref ref-type="bibr" rid="ref23">23</xref>] conducted a TZ segmentation and classification model based on colposcopy images from the IARC image bank. Similarly, Cao et al [<xref ref-type="bibr" rid="ref24">24</xref>] developed a high-performance, deep learning model based on retrospective image data collected from one hospital [<xref ref-type="bibr" rid="ref24">24</xref>]. Comparatively, our method was based on a multicenter study with a more diverse dataset of colposcopy images. While this diversity added to the challenge of feature extraction, the classification model showed better accuracy. Second, the proposed segmentation model could precisely annotate the new SCJ and indicate the approximate location of TZ, which could greatly assist junior colposcopists in selecting biopsy sites. Effective biopsy prioritization focused on the most severe lesions, particularly those within the TZ. Compared with the previous study [<xref ref-type="bibr" rid="ref23">23</xref>], our method delineated a new SCJ rather than the original SCJ, which can provide more effective assistance and insights for colposcopic clinical examination and treatment. Furthermore, conducted external validation for the TZ classification model for the first time, and our model demonstrated strong stability and generalization performance.</p>
      </sec>
      <sec>
        <title>Clinical Implications</title>
        <p>In resource-limited settings, colposcopists demonstrated significantly lower clinical skills and performance than those observed in our study. The results of a study evaluating the colposcopic abilities of colposcopists in underserved Chinese communities indicated that colposcopists had a mere 22% accuracy specifically for TZ3 [<xref ref-type="bibr" rid="ref8">8</xref>]. Similarly, in another study, colposcopists’ clinical diagnostic abilities were assessed before and after intensive training [<xref ref-type="bibr" rid="ref9">9</xref>]. It was found that junior colposcopists had only 49.1% accuracy in classifying TZs. Despite the notable improvement, their accuracy remained below optimal levels at 68.6% following training. In addition, a comparable study conducted in Europe demonstrated that junior colposcopists were only able to detect TZs with a 55% accuracy rate [<xref ref-type="bibr" rid="ref25">25</xref>]. According to these findings, colposcopists at less-experienced levels generally exhibited inadequate colposcopic performance. However, AI might offer a promising solution to enhance colposcopic capabilities and clinical decision-making confidence. Compared to these studies, our TZ classification model demonstrated strong performance with a high accuracy of 83.97% in the test set and achieved much higher sensitivity in predicting three TZ types in the validation study. Based on the results of previous studies, our model was more effective at stand-alone classification than that of junior colposcopists. Therefore, the method presented in this study accurately identifies TZ in colposcopy images, providing a valuable reference for colposcopists when making clinical decisions. The findings from this study supported the potential of the proposed AI-based TZ identification method as a promising adjunct tool for colposcopic examinations, particularly when integrated with AI colposcopic diagnostic systems and digital colposcopes. Dynamic digital imaging with AI assistance enhances the objectivity of colposcopic examinations and might address the diagnostic subjectivity of less experienced colposcopists. When AI-guided digital colposcopists are deployed in resource-limited health care settings, colposcopists will be able to receive intuitive and accessible guidance on clinical features from AI during the colposcopic examination in real time, supporting less-experienced colposcopists in improving their overall colposcopy skills. Combining AI results with colposcopist assessments helps reduce diagnostic bias, improve colposcopic examination capability, and narrow the gap with resource-rich areas. It addresses the minimization of missed diagnoses, the facilitation of early detection of lesions, the reduction of the risk of CIN progression, and the reduction of the burden of cervical cancer [<xref ref-type="bibr" rid="ref26">26</xref>]. The application of AI will be integral to improving the quality of colposcopy services in low-resource settings.</p>
        <p>The development and application research of AI-guided colposcopy models has emphasized their auxiliary value in clinical practice [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. With the advent of innovations in AI algorithms, these models rely predominantly on mainstream CNNs coupled with transfer learning, as a method of unsupervised learning for image feature learning [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. In some AI models, object detection is used as a metric for identifying lesion areas; however, it remains limited to binary classification tasks. Nevertheless, the multiclassification of normal cervix, low-grade squamous intraepithelial lesion, and high-grade squamous intraepithelial lesion plus poses significant challenges to these models. A clinical perspective suggested that acetowhite changes within TZ have a higher likelihood of association with CIN lesions than those outside of TZ. While the lesion extends across the new SCJ, a biopsy should be taken within the TZ or at the new SCJ. Therefore, by incorporating the TZ-type information as a weight in the model, the AI could be able to perform a more effective feature engineering process within the TZ. It is anticipated that the AI model will markedly enhance the capability of lesion detection, thus eliminating the current limitation in which precise identification is confined to CIN2+ lesions. Our proposed AI system delineated the new SCJ on high-resolution colposcopy images, and it enabled the approximate location of the TZ to be visualized more intuitively, thereby guiding more effective colposcopy examination and biopsy procedures.</p>
        <p>For applying AI techniques in limited-resource settings, we proposed a model based on the MobileNetV3 architecture for cervical TZ classification in colposcopy images. Since Google introduced MobileNet in 2017 [<xref ref-type="bibr" rid="ref30">30</xref>] as a lightweight CNN architecture, it has gained significant attention for its efficiency and accuracy. A lightweight AI model is more energy efficient and requires significantly fewer computational resources than a large-scale AI model, aligning with sustainable AI practices. It has been designed to operate efficiently on battery-operated devices, making it especially suitable for deployment in remote regions with limited power availability and internet access. By minimizing the reliance on high-cost hardware and extensive cloud infrastructure, lightweight AI models enable resource optimization in low-resource settings and facilitate equitable access to AI. Our AI model achieved efficient computation and robust classification performance on portable devices and can be applied actively in a variety of clinical settings to validate its generalization ability. A further economic evaluation is required to support the decision to adopt novel technologies in screening strategies.</p>
      </sec>
      <sec>
        <title>Limitations and Future Directions</title>
        <p>There are several limitations to this study. First, the scale of the dataset we included was limited, which restricted the ability to support training deeper or more complex deep neural networks. However, the quality and standardization of image acquisition were assured, and the appropriate network depth was chosen to maximize feature extraction. It is necessary to obtain more high-quality colposcopy images with endocervix expansion by auxiliary instruments per case to train more complex networks that can extract additional features and potentially improve classification accuracy in TZ2 and TZ3. To address the suboptimal performance in TZ2 classification, expanding the dataset for TZ2 in future studies could enable the model to capture more distinct texture features that differentiate it from TZ1 and TZ3. The model could be further enhanced by applying edge detection techniques in regions where it is difficult to distinguish TZ2 from TZ3 and to highlight subtle morphological changes at the new SCJ. Second, the external validation was limited to evaluating the AI results alone. Further research is needed to assess the impact of AI-aided colposcopists in the classification of TZ. A prospective clinical trial is further needed to investigate whether the method can be applied to real-world colposcopy clinics. In addition, although we collected the validation datasets from two different hospitals, the scale of the external validation dataset is relatively limited, particularly when it comes to representing clinical scenarios that involve different colposcope vendors and lighting conditions. Future studies will focus on expanding both the size and scope of the dataset, incorporating data from more regions for more comprehensive validation. Furthermore, transfer learning or domain adaptation techniques may be used to improve the model’s robustness and adaptability to various imaging conditions. Finally, the TZ classification lacks an absolute subjective gold standard but is guided by colposcopic examination criteria and expert consensus. However, the colposcopy expert panel from tertiary hospitals conducted the “ground truth” categories for this study in accordance with established guidelines from IFCPC. In light of the subjectivity inherent in TZ classification, using only single-image modality data for model development is limited. Model optimization in the future may implement multimodal learning, which incorporates image data with associated clinical factors (eg, age, menstrual status, gravidity, and parity) to reduce the subjectivity of the model and enhance TZ feature discrimination. Furthermore, the construction of a knowledge graph based on existing consensus and colposcopic examination guidelines will help the model adhere to various rules or logic during the learning process.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>In this study, an accurate AI-based method was developed and evaluated for automatically identifying cervical TZ using colposcopy images. The proposed method was the first application of a lightweight CNN for cervical feature extraction and applied a general segmentation model for TZ delineation among multicenter images, achieving commendable classification accuracy on TZ. The proposed approach has the potential to adapt to various colposcopy clinical environments and improve AI-guided colposcopy practice.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CIN</term>
          <def>
            <p>cervical intraepithelial neoplasia</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">ECC</term>
          <def>
            <p>endocervical curettage</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">IFCPC</term>
          <def>
            <p>International Federation for Cervical Pathology and Colposcopy</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">mAP50</term>
          <def>
            <p>mean average precision at 50% Intersection over Union</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">NPV</term>
          <def>
            <p>negative predictive value</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">SCJ</term>
          <def>
            <p>squamocolumnar junction</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">SOTA</term>
          <def>
            <p>state-of-the-art</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">TZ</term>
          <def>
            <p>transformation zone</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>We acknowledge Haiyan Hu (Shenzhen Maternity and Child Healthcare Hospital), Zhilian Wang (Second Hospital of Shanxi Medical University), Jinfeng Liu (Gansu Maternity and Child Healthcare Hospital), and Jun Lei (Jiangxi Maternity and Child Healthcare Hospital) for assisting with colposcopy image collection and manual labeling. We thank Ms Qianling Dai (Chengdu Women’s and Children’s Central Hospital), and Ms Haiyan Hu for quality control. We also thank Ms Qianling Dai and Ms Mingyang Chen (Chinese Academy of Medical Sciences and Peking Union Medical College) for their comments on the original manuscript. This work was supported by the CAMS Innovation Fund for Medical Sciences (CIFMS 2021-I2M-1-004), the National Natural Science Foundation of China (82404286), the China Postdoctoral Science Foundation (2023M740323), and the Postdoctoral Fellowship Program of CPSF (GZB20230076).</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The datasets generated or analyzed during this study are not publicly available due to data-sharing policies set by the funding institution but are available from the corresponding author on reasonable request.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>TW and YQ contributed to conceptualization. TW was responsible for writing—original draft, methodology, software, and formal analysis. TW and YW conducted data analysis and visualization. YW and PX were responsible for data curation and writing—review and editing. XC and YQ contributed to supervision. YQ and PX handled funding acquisition.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bray</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Laversanne</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sung</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ferlay</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Siegel</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Soerjomataram</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Jemal</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Global cancer statistics 2022: GLOBOCAN estimates of incidence and mortality worldwide for 36 cancers in 185 countries</article-title>
          <source>CA Cancer J Clin</source>
          <year>2024</year>
          <volume>74</volume>
          <issue>3</issue>
          <fpage>229</fpage>
          <lpage>263</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://onlinelibrary.wiley.com/doi/10.3322/caac.21834"/>
          </comment>
          <pub-id pub-id-type="doi">10.3322/caac.21834</pub-id>
          <pub-id pub-id-type="medline">38572751</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ogilvie</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Nakisige</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Huh</surname>
              <given-names>WK</given-names>
            </name>
            <name name-style="western">
              <surname>Mehrotra</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Franco</surname>
              <given-names>EL</given-names>
            </name>
            <name name-style="western">
              <surname>Jeronimo</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Optimizing secondary prevention of cervical cancer: recent advances and future challenges</article-title>
          <source>Int J Gynaecol Obstet</source>
          <year>2017</year>
          <volume>138 Suppl 1</volume>
          <fpage>15</fpage>
          <lpage>19</lpage>
          <pub-id pub-id-type="doi">10.1002/ijgo.12187</pub-id>
          <pub-id pub-id-type="medline">28691338</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Leeson</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>Alibegashvili</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Arbyn</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bergeron</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Carriero</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mergui</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Nieminen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Prendiville</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Redman</surname>
              <given-names>CWE</given-names>
            </name>
            <name name-style="western">
              <surname>Rieck</surname>
              <given-names>GC</given-names>
            </name>
            <name name-style="western">
              <surname>Quaas</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Petry</surname>
              <given-names>KU</given-names>
            </name>
          </person-group>
          <article-title>The future role for colposcopy in Europe</article-title>
          <source>J Low Genit Tract Dis</source>
          <year>2014</year>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>70</fpage>
          <lpage>78</lpage>
          <pub-id pub-id-type="doi">10.1097/LGT.0b013e318286b899</pub-id>
          <pub-id pub-id-type="medline">23774077</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Lang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>How can China achieve WHO's 2030 targets for eliminating cervical cancer?</article-title>
          <source>BMJ</source>
          <year>2024</year>
          <volume>386</volume>
          <fpage>e078641</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.bmj.com/lookup/pmidlookup?view=long&#38;pmid=39214542"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmj-2023-078641</pub-id>
          <pub-id pub-id-type="medline">39214542</pub-id>
          <pub-id pub-id-type="pmcid">PMC11359838</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mondal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sur</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Woodworth</surname>
              <given-names>CD</given-names>
            </name>
          </person-group>
          <article-title>Establishment and optimization of epithelial cell cultures from human ectocervix, transformation zone, and endocervix optimization of epithelial cell cultures</article-title>
          <source>J Cell Physiol</source>
          <year>2019</year>
          <volume>234</volume>
          <issue>6</issue>
          <fpage>7683</fpage>
          <lpage>7694</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30609028"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/jcp.28049</pub-id>
          <pub-id pub-id-type="medline">30609028</pub-id>
          <pub-id pub-id-type="pmcid">PMC6395493</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Prendiville</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Sankaranarayanan</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <source>Colposcopy and Treatment of Cervical Precancer</source>
          <year>2017</year>
          <publisher-loc>Lyon, France</publisher-loc>
          <publisher-name>International Agency for Research on Cancer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Seery</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Endocervical curettage for diagnosing high-grade squamous intraepithelial lesions or worse in women with type 3 transformation zone lesions: a retrospective, observational study</article-title>
          <source>BMC Womens Health</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>245</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcwomenshealth.biomedcentral.com/articles/10.1186/s12905-023-02297-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12905-023-02297-0</pub-id>
          <pub-id pub-id-type="medline">37161558</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12905-023-02297-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC10170824</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Seery</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Assessing colposcopy competencies in medically underserved communities: a multi-center study in China</article-title>
          <source>BMC Cancer</source>
          <year>2024</year>
          <volume>24</volume>
          <issue>1</issue>
          <fpage>349</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmccancer.biomedcentral.com/articles/10.1186/s12885-024-12106-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12885-024-12106-y</pub-id>
          <pub-id pub-id-type="medline">38504211</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12885-024-12106-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC10949713</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Seery</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Enhancing colposcopy training using a widely accessible digital education tool in China</article-title>
          <source>Am J Obstet Gynecol</source>
          <year>2023</year>
          <volume>229</volume>
          <issue>5</issue>
          <fpage>538.e1</fpage>
          <lpage>538.e9</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ajog.2023.07.043</pub-id>
          <pub-id pub-id-type="medline">37516400</pub-id>
          <pub-id pub-id-type="pii">S0002-9378(23)00515-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>An</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>HW</given-names>
            </name>
            <name name-style="western">
              <surname>Min</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JK</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>NW</given-names>
            </name>
          </person-group>
          <article-title>Pivotal clinical study to evaluate the efficacy and safety of assistive artificial intelligence-based software for cervical cancer diagnosis</article-title>
          <source>J Clin Med</source>
          <year>2023</year>
          <volume>12</volume>
          <issue>12</issue>
          <fpage>4024</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=jcm12124024"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/jcm12124024</pub-id>
          <pub-id pub-id-type="medline">37373717</pub-id>
          <pub-id pub-id-type="pii">jcm12124024</pub-id>
          <pub-id pub-id-type="pmcid">PMC10298986</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Abulizi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Tuerxun</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Rezhake</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in colposcopic examination: a promising tool to assist junior colposcopists</article-title>
          <source>Front Med (Lausanne)</source>
          <year>2023</year>
          <volume>10</volume>
          <fpage>1060451</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37056736"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fmed.2023.1060451</pub-id>
          <pub-id pub-id-type="medline">37056736</pub-id>
          <pub-id pub-id-type="pmcid">PMC10088560</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>The application of deep learning based diagnostic system to cervical squamous intraepithelial lesions recognition in colposcopy images</article-title>
          <source>Sci Rep</source>
          <year>2020</year>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>11639</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-020-68252-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-020-68252-3</pub-id>
          <pub-id pub-id-type="medline">32669565</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-020-68252-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC7363819</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Diagnosis of cervical precancerous lesions based on multimodal feature changes</article-title>
          <source>Comput Biol Med</source>
          <year>2021</year>
          <volume>130</volume>
          <fpage>104209</fpage>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2021.104209</pub-id>
          <pub-id pub-id-type="medline">33440316</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(21)00003-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qian</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>MTA</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Development and validation of an artificial intelligence system for grading colposcopic impressions and guiding biopsies</article-title>
          <source>BMC Med</source>
          <year>2020</year>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>406</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedicine.biomedcentral.com/articles/10.1186/s12916-020-01860-y"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12916-020-01860-y</pub-id>
          <pub-id pub-id-type="medline">33349257</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12916-020-01860-y</pub-id>
          <pub-id pub-id-type="pmcid">PMC7754595</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>ZH</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Qian</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>YL</given-names>
            </name>
          </person-group>
          <article-title>GRAND: A large-scale dataset and benchmark for cervical intraepithelial neoplasia grading with fine-grained lesion description</article-title>
          <source>Med Image Anal</source>
          <year>2021</year>
          <volume>70</volume>
          <fpage>102006</fpage>
          <pub-id pub-id-type="doi">10.1016/j.media.2021.102006</pub-id>
          <pub-id pub-id-type="medline">33690025</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(21)00052-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Redmon</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Divvala</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Girshick</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Farhadi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>You only look once: unified, real-time object detection</article-title>
          <source>arXiv. Preprint posted online on June 8, 2015</source>
          <pub-id pub-id-type="doi">10.48550/arXiv.1506.02640</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Distance-IoU loss: faster and better learning for bounding box regression</article-title>
          <year>2020</year>
          <conf-name>Proceedings of the AAAI Conference on Artificial Intelligence</conf-name>
          <conf-date>February 7-12, 2020</conf-date>
          <conf-loc>New York, NY</conf-loc>
          <pub-id pub-id-type="doi">10.1609/aaai.v34i07.6999</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Squeeze-and-excitation networks</article-title>
          <year>2018</year>
          <conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 18-23,  2018</conf-date>
          <conf-loc>Salt Lake City, UT</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvpr.2018.00745</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ding</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>An</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Fast segment anything</article-title>
          <source>arXiv. Preprint posted online on June 21, 2023</source>
          <pub-id pub-id-type="doi">10.48550/arXiv.2306.12156</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Torralba</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Russell</surname>
              <given-names>BC</given-names>
            </name>
            <name name-style="western">
              <surname>Yuen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>LabelMe: online image annotation and applications</article-title>
          <source>Proceedings of the IEEE</source>
          <year>2010</year>
          <volume>98</volume>
          <issue>8</issue>
          <fpage>1467</fpage>
          <lpage>1484</lpage>
          <pub-id pub-id-type="doi">10.1109/jproc.2010.2050290</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Seery</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Improving colposcopic accuracy for cervical precancer detection: a retrospective multicenter study in China</article-title>
          <source>BMC Cancer</source>
          <year>2022</year>
          <volume>22</volume>
          <issue>1</issue>
          <fpage>388</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmccancer.biomedcentral.com/articles/10.1186/s12885-022-09498-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12885-022-09498-0</pub-id>
          <pub-id pub-id-type="medline">35399061</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12885-022-09498-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC8994905</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bornstein</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bentley</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bösze</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Girardi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Haefner</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Menton</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Perrotta</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Prendiville</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Russell</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sideri</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Strander</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Tatti</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Torne</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Walker</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>2011 colposcopic terminology of the international federation for cervical pathology and colposcopy</article-title>
          <source>Obstet Gynecol</source>
          <year>2012</year>
          <volume>120</volume>
          <issue>1</issue>
          <fpage>166</fpage>
          <lpage>172</lpage>
          <pub-id pub-id-type="doi">10.1097/AOG.0b013e318254f90c</pub-id>
          <pub-id pub-id-type="medline">22914406</pub-id>
          <pub-id pub-id-type="pii">00006250-201207000-00026</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dash</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sethy</surname>
              <given-names>PK</given-names>
            </name>
            <name name-style="western">
              <surname>Behera</surname>
              <given-names>SK</given-names>
            </name>
          </person-group>
          <article-title>Cervical transformation zone segmentation and classification based on improved inception-resNet-V2 using colposcopy images</article-title>
          <source>Cancer Inform</source>
          <year>2023</year>
          <volume>22</volume>
          <fpage>11769351231161477</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/11769351231161477?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/11769351231161477</pub-id>
          <pub-id pub-id-type="medline">37008072</pub-id>
          <pub-id pub-id-type="pii">10.1177_11769351231161477</pub-id>
          <pub-id pub-id-type="pmcid">PMC10064461</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>A deep learning-based method for cervical transformation zone classification in colposcopy images</article-title>
          <source>Technol Health Care</source>
          <year>2023</year>
          <volume>31</volume>
          <issue>2</issue>
          <fpage>527</fpage>
          <lpage>538</lpage>
          <pub-id pub-id-type="doi">10.3233/THC-220141</pub-id>
          <pub-id pub-id-type="medline">36093645</pub-id>
          <pub-id pub-id-type="pii">THC220141</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sabrina</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ilkka</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Mervi</surname>
              <given-names>HN</given-names>
            </name>
            <name name-style="western">
              <surname>Charles</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Simon</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ameli</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Esther</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Maria</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Eeva</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Pekka</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Fostering prevention of cervical cancer by a correct diagnosis of precursors: a structured case-based colposcopy course in finland, Norway and UK</article-title>
          <source>Cancers (Basel)</source>
          <year>2020</year>
          <volume>12</volume>
          <issue>11</issue>
          <fpage>3201</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=cancers12113201"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/cancers12113201</pub-id>
          <pub-id pub-id-type="medline">33143157</pub-id>
          <pub-id pub-id-type="pii">cancers12113201</pub-id>
          <pub-id pub-id-type="pmcid">PMC7692698</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Hu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>You</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Estimation of economic burden throughout course of cervical squamous intraepithelial lesion and cervical cancer in China: a nationwide multicenter cross-sectional study</article-title>
          <source>Chin J Cancer Res</source>
          <year>2023</year>
          <volume>35</volume>
          <issue>6</issue>
          <fpage>675</fpage>
          <lpage>685</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38204443"/>
          </comment>
          <pub-id pub-id-type="doi">10.21147/j.issn.1000-9604.2023.06.11</pub-id>
          <pub-id pub-id-type="medline">38204443</pub-id>
          <pub-id pub-id-type="pii">cjcr-35-6-675</pub-id>
          <pub-id pub-id-type="pmcid">PMC10774142</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bao</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Pang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Turic</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>The artificial intelligence-assisted cytology diagnostic system in large-scale cervical cancer screening: a population-based cohort study of 0.7 million women</article-title>
          <source>Cancer Med</source>
          <year>2020</year>
          <volume>9</volume>
          <issue>18</issue>
          <fpage>6896</fpage>
          <lpage>6906</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32697872"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/cam4.3296</pub-id>
          <pub-id pub-id-type="medline">32697872</pub-id>
          <pub-id pub-id-type="pmcid">PMC7520355</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chandran</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Sumithra</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Karthick</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>George</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Deivakani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Elakkiya</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Subramaniam</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Manoharan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Diagnosis of cervical cancer based on ensemble deep learning network using colposcopy images</article-title>
          <source>Biomed Res Int</source>
          <year>2021</year>
          <volume>2021</volume>
          <fpage>5584004</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1155/2021/5584004"/>
          </comment>
          <pub-id pub-id-type="doi">10.1155/2021/5584004</pub-id>
          <pub-id pub-id-type="medline">33997017</pub-id>
          <pub-id pub-id-type="pmcid">PMC8112909</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>YM</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>PZ</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Ruan</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>MDFI: Multi-CNN decision feature integration for diagnosis of cervical precancerous lesions</article-title>
          <source>IEEE Access</source>
          <year>2020</year>
          <volume>8</volume>
          <fpage>29616</fpage>
          <lpage>29626</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2020.2972610</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>AG</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kalenichenko</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Weyand</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Andreetto</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Adam</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>MobileNets: efficient convolutional neural networks for mobile vision applications</article-title>
          <source>arXiv. Preprint posted online on April 17, 2017</source>
          <pub-id pub-id-type="doi">10.48550/arXiv.1704.04861</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
