Bikarhêner:Balyozxane/skrîpt/py/kuCosmeticsCore.py

Ji Wîkîpediya, ensîklopediya azad.
#!/usr/bin/env python3
"""
python pwb.py updatewin -f:"kucosmetics.py" -s:"fix"

this is a fork of master cosmetic_changes.py which comes with pywikibot. The script can run standalone using [[Bikarhêner:Balyozxane/skrîpt/py/kuCosmeticsRun.py]] which doesn't edit the page if only whitespace changes are detected.

Added:

fixVrefNames --> renames visual editor ref names.

replaceDeprecatedTemplates --> changes redirected templates to target template using a json file ([[Bikarhêner:Balyozxane/skrîpt/py/listeyasablonan.py]]).

fixOthers --> A few standard changes for ku.wiki

replaceDeprecatedParams --> Uses WP:AutoWikiBrowser/Rename template parameters to replace deprecated/English parameters

removeDupeCats --> Removes dublicate categories

fixAgahidankSpace --> standardizes the number of space characters between Agahîdank templates

removeSelfCat --> removes category from self-categoriezed cats

fixPunctAfterTempl --> Niqeteşaniya piştî şablonên wekî 'çavkanî hewce ye' dixe berî şablonê

fixMainCat --> add main cat from wikidata or create same as page title
"""
import re
import mytools
from enum import IntEnum
from typing import Any, Union, Tuple
from urllib.parse import urlparse, urlunparse
import requests
import json
from bs4 import BeautifulSoup
import mwparserfromhell
import string
import pywikibot
from pywikibot import textlib
from pywikibot.backports import Callable, Dict, List, Match, Pattern
from pywikibot.exceptions import InvalidTitleError
from pywikibot.textlib import FILE_LINK_REGEX
from pywikibot.tools import first_lower, first_upper
from pywikibot.tools.chars import url2string

try:
    import stdnum.isbn as stdnum_isbn
except ImportError:
    stdnum_isbn = None

# Subpage templates. Must be in lower case,
# whereas subpage itself must be case sensitive
moved_links = {
    'ku': (['documentation', 'belgekirin'], '/belge'),
}

VERBOSE = False
TESTING = False


class CANCEL(IntEnum):
    """Cancel level to ignore exceptions.

    If an error occurred and either skips the page or the method
    or a single match. ALL raises the exception.

    .. versionadded:: 6.3
    """

    ALL = 0
    PAGE = 1
    METHOD = 2
    MATCH = 3


def _format_isbn_match(match: Match[str], strict: bool = True) -> str:
    """Helper function to validate and format a single matched ISBN."""
    if not stdnum_isbn:
        raise NotImplementedError(
            'ISBN functionality not available. Install stdnum package.')

    isbn = match['code']
    try:
        stdnum_isbn.validate(isbn)
    except stdnum_isbn.ValidationError as e:
        if strict:
            raise
        pywikibot.log(f'ISBN "{isbn}" validation error: {e}')
        return isbn

    return stdnum_isbn.format(isbn)


def _reformat_ISBNs(text: str, strict: bool = True) -> str:
    """Helper function to normalise ISBNs in text.

    :raises Exception: Invalid ISBN encountered when strict enabled
    """
    return textlib.reformat_ISBNs(
        text, lambda match: _format_isbn_match(match, strict=strict))


class PageTagger:

    @staticmethod
    def isSewi(page: pywikibot.page.BasePage) -> bool:
        """
        Checks if a page is sêwî or not

        :param page: pywikibot.page.BasePage
        :return: True if sêwî else False.
        """
        incoming_links = list(page.getReferences(namespaces=[0]))

        if len(incoming_links) < 1:
            return True

        for link in incoming_links:

            # If the only incoming link is the page itself, return sewî
            if link.title() == page.title() and len(incoming_links) == 1:
                return True

            bi_zaravayen_din = mytools.zaravayen_din(link.categories())

            # Return not sewî if the link is not in zarava categories, not a redirect, and not a disambiguation page
            if not bi_zaravayen_din and not link.isRedirectPage() and not link.isDisambig():
                return False

        # If all links are redirects or disambiguations, return sewî
        return True

    @staticmethod
    def isSitil(page: pywikibot.page.BasePage) -> bool:
        html_content = page.get_parsed_page()
        soup = BeautifulSoup(html_content, 'html.parser')
        word_count = 0

        # Calculate prose size and word count for each paragraph
        for paragraph in soup.find_all('p'):
            paragraph_text = paragraph.get_text()
            clean_content = re.sub(r'<([^>]+)>', '', paragraph_text, flags=re.IGNORECASE)
            words = clean_content.split()
            word_count += len(words)

        return word_count <= 300


class CosmeticChangesToolkit:
    """Cosmetic changes toolkit.

    .. versionchanged:: 7.0
       `from_page()` method was removed
    """

    def __init__(self, page: 'pywikibot.page.BasePage', *,
                 show_diff: bool = False,
                 ignore: IntEnum = CANCEL.ALL) -> None:

        """Initializer.

        .. versionchanged:: 5.2
           instantiate the CosmeticChangesToolkit from a page object;
           only allow keyword arguments except for page parameter;
           `namespace` and `pageTitle` parameters are deprecated

        .. versionchanged:: 7.0
           `namespace` and `pageTitle` parameters were removed

        :param page: the Page object containing the text to be modified
        :param show_diff: show difference after replacements
        :param ignore: ignores if an error occurred and either skips the page
            or only that method. It can be set one of the CANCEL constants
        """
        if page.site.sitename != 'wikipedia:ku':
            raise ValueError("This script should only be used on ku:wikipedia")

        self.site = page.site
        self.current_page = page
        self.title = page.title()
        self.namespace = page.namespace()

        self.show_diff = show_diff
        self.template = (self.namespace == 10)
        self.talkpage = self.namespace >= 0 and self.namespace % 2 == 1
        self.ignore = ignore
        self.summaries = {}

        if self.namespace == 0:
            self.is_disambig = page.isDisambig()
            self.gotara_zaravayan = mytools.zaravayen_din(page.categories())

            self.contains_sewi_cat = mytools.is_category_in_page(self.current_page, 'Kategorî:Hemû gotarên sêwî')

            self.contains_sitil_cat = mytools.is_category_in_page(self.current_page, 'Kategorî:Hemû şitil')

            self.is_sewi = PageTagger.isSewi(page)
            self.is_sitil = PageTagger.isSitil(page)

        self.common_methods = [

            self.replaceDeprecatedTemplates,

            self.addOrphanTag,
            self.removeOrphanTag,
            self.addMultIssues,
            self.fixCudakirinPlace,
            self.addStubTag,
            self.removeStubTag,

            self.fixSelfInterwiki,
            self.fixMainCat,
            self.standardizePageFooter,
            self.fixSyntaxSave,
            self.cleanUpLinks,
            self.cleanUpSectionHeaders,
            self.putSpacesInLists,
            self.translateAndCapitalizeNamespaces,
            self.translateMagicWords,
            self.resolveHtmlEntities,
            self.removeUselessSpaces,
            self.removeNonBreakingSpaceBeforePercent,

            self.fixHtml,
            self.fixReferences,
            self.fixVrefNames,
            self.fixStyle,
            self.fixTypo,
            self.fixOthers,
            self.replaceDeprecatedParams,
            self.removeDupeCats,
            self.removeDupeParam,
            self.fixAgahidankSpace,
            self.removeSelfCat,
            self.fixPunctAfterTempl
        ]
        if stdnum_isbn:
            self.common_methods.append(self.fix_ISBN)

    # Define the explanation for each method
    method_explanations = {
        'addOrphanTag': 'Şablona {{[[Şablon:Sêwî|Sêwî]]}} lê zêde kir',
        'removeOrphanTag': 'Şablona {{[[Şablon:Sêwî|Sêwî]]}} rakir',
        'addMultIssues': 'Destpêka rûpelê standard kir',
        'fixCudakirinPlace': 'Şablona cudakirinê xist herî jor',
        'addStubTag': 'Şablona {{[[Şablon:şitil|Şitil]]}} lê zêde kir',
        'removeStubTag': 'Şablona {{[[Şablon:şitil|Şitil]]}} rakir',

        'fixSelfInterwiki': 'Lînkên înterwîkî sererast kir',
        'fix_ISBN': 'ISBN sererast kir',
        'fixMainCat': 'Kategoriya sereke lê zêde kir',
        'standardizePageFooter': 'Binê rûpelê standard kir',
        'fixSyntaxSave': 'Xeletiyên sentaksê sererast kir',
        'cleanUpLinks': 'Lînk paqij kir',
        'cleanUpSectionHeaders': 'Valahiya beşan sererast kir',
        'putSpacesInLists': 'Valahî li lîsteyan zêde kir',
        'translateAndCapitalizeNamespaces': 'Valahiya nav tercime û mezin kir',
        'translateMagicWords': 'Kelîmeyên sihirî tercime kir',
        'replaceDeprecatedTemplates': 'Şablonên beralîkirî guhart',
        'resolveHtmlEntities': 'HTML sererast kir',
        'removeUselessSpaces': 'Valahiyên ne hewce jê bir',
        'removeNonBreakingSpaceBeforePercent': 'Valahiya beriya sedî jê bir',
        'fixHtml': 'Xeletiyên HTMLê sererast kir',
        'fixReferences': 'Referans sererast kir',
        'fixVrefNames': 'Navên referansan sererast kir',
        'fixStyle': 'Xeletiyên stîlê sererat kir',
        'fixTypo': 'Valahiyê sererast kir',
        'fixOthers': 'Sernavên beşan sererast kir',
        'replaceDeprecatedParams': 'Parametreyên îngilîzî/xelet sererast kir',
        'removeDupeCats': 'Kategoriya ducarî jê bir',
        'fixAgahidankSpace': 'Valahiya di agahîdankê de standard kir',
        'removeSelfCat': 'Kategoriya li ser xwe jê bir',
        'removeDupeParam': 'Parametreya ducarî ya vala jê bir',
        'fixPunctAfterTempl': 'Niqteşanî piştî şablonê sererast kir'
    }

    def safe_execute(self, method: Callable[[str], str], text: str) -> str:
        """Execute the method and catch exceptions if enabled."""
        result = None
        try:
            result = method(text)
        except Exception as e:
            if self.ignore == CANCEL.METHOD:
                pywikibot.warning('Unable to perform "{}" on "{}"!'
                                  .format(method.__name__, self.title))
                pywikibot.error(e)
            else:
                raise
        return text if result is None else result

    def _check_modification(self, method_name: str, old_text: str, new_text: str) -> None:
        """Check if the text is modified by a method and generate a summary."""
        if old_text != new_text:
            summary = self.method_explanations.get(method_name, 'sererastkirinên din')
            self.summaries[method_name] = summary

    def _change(self, text: str) -> str:
        """Execute all clean up methods."""
        modified_text = text
        for method in self.common_methods:
            old_text = modified_text
            modified_text = self.safe_execute(method, modified_text)
            self._check_modification(method.__name__, old_text, modified_text)
        return modified_text

    def change(self, text: str) -> Tuple[str, Dict[Any, Any]]:
        """Execute all clean up methods and catch errors if activated."""
        try:
            new_text = self._change(text)
        except Exception as e:
            if self.ignore == CANCEL.PAGE:
                pywikibot.warning('Skipped "{}", because an error occurred.'
                                  .format(self.title))
                pywikibot.error(e)
                return "", {}  # Return empty string and empty dictionary
            raise
        else:
            if self.show_diff:
                pywikibot.showDiff(text, new_text)
            return new_text, self.summaries

    def get_main_cat(self, title: str) -> Union[dict, None]:
        """Get the P910 value from Wikidata for the given page."""

        # Construct the Wikidata API URL
        wikidata_api_url = 'https://www.wikidata.org/w/api.php'
        params = {
            'action': 'wbgetentities',
            'sites': 'kuwiki',
            'titles': title,
            'props': 'claims|sitelinks',
            'format': 'json'
        }

        # Make the API request
        try:
            response = requests.get(wikidata_api_url, params=params)
            response.raise_for_status()  # Raise an exception for bad responses
        except requests.exceptions.RequestException as e:
            print(f"Error fetching data from Wikidata: {e}")
            return None

        data = response.json()

        # Check if the response contains the item ID
        entities = data.get('entities')
        if not entities:
            return None

        # Extract the item ID
        item_id = next(iter(entities))
        item_data = entities[item_id]

        # Check if the item has the P910 property
        claims = item_data.get('claims', {})
        P910_claims = claims.get('P910', [])
        if not P910_claims:
            return None

        # Get the target value from the claim
        P910_claim = P910_claims[0]
        mainsnak = P910_claim.get('mainsnak', {})
        datavalue = mainsnak.get('datavalue', {})
        value = datavalue.get('value', {})
        target_id = value.get('id')

        sitelinks = item_data.get('sitelinks', {})
        enwiki_page_dict = sitelinks.get('enwiki', None)

        # Check if enwiki_page_dict is None
        if enwiki_page_dict is None:
            return None

        enwiki_page = enwiki_page_dict.get('title', None)

        if target_id and enwiki_page:
            if VERBOSE:
                print(f"QID  main_Cat: {target_id}")
                print(f"enwiki_page for current page: {enwiki_page}")

            retr_links = mytools.get_sitelinks_qid(target_id, ['ku', 'en'])
            kuwiki_main = retr_links.get('kuwiki')
            enwiki_main = retr_links.get('enwiki')

            result = {}

            if kuwiki_main:
                result["kuwiki"] = kuwiki_main
                return result
            else:
                if enwiki_main:
                    if enwiki_main.replace('Category:', '') == enwiki_page:
                        result["enwiki"] = enwiki_main
                        return result
                    else:
                        return None
        else:
            return None

    def create_main(self, page, enwiki_page):
        new_cat_title = 'Kategorî:' + page.title()
        new_cat_page = pywikibot.Page(self.site, new_cat_title)

        if new_cat_page.exists():
            if VERBOSE:
                print('Kategorî jixwe heye. Dev jê berde.')
            return None

        page_text = '{{subst:bêkategorî}}\n{{standard-kat}}'
        page_text += f'\n\n[[en:{enwiki_page}]]'
        new_cat_page.text = page_text

        summary = f'[[User:Balyozxane/skrîpt/py/kuCosmeticsCore.py|Bot]]: Wekheva [[en:{enwiki_page}]] hat çêkirin'
        if not TESTING:
            new_cat_page.save(summary=summary)
        return pywikibot.Category(self.site, new_cat_title, sort_key=' ')

    def fixOthers(self, text: str) -> str:
        if self.namespace != 0:
            return text

        if self.is_disambig or self.gotara_zaravayan:
            return text

        replacements = {
            r'==\s*[gG]ir[eê]dan[aêîi]n?\s+[Dd]erv(a|eyî|ê)\s*==': '== Girêdanên derve ==',
            r'==\s*Erdn[îi]garî?\s*==': '== Erdnîgarî ==',
            r'==\s*[Çç]ava?kanî\s*==': '== Çavkanî ==',
            r'==\s*Tûrîzm\s*==': '== Turîzm ==',
            r'==\s*[bB]in[eê]r[eê] [Jj]î\s*==': '== Binêre herwiha =='
        }

        for pattern, replacement in replacements.items():
            text = re.sub(pattern, replacement, text)

        return text

    def fixPunctAfterTempl(self, text: str) -> str:
        """
        Replace specified template names with a punctuation mark followed by the template.

        :param text: The input wiki text.
        :return: The modified wiki text.
        """

        if self.namespace != 0:
            return text

        # Define punctuation marks
        punctuation_marks = [",", ".", ":", ";", "!", "?"]
        template_names = ['Çavkanî hewce ye', 'Ne kurdî-biçûk', 'Zelalkirin hewce ye']

        # Iterate over template names
        for template_name in template_names:
            # Define the pattern to match the template followed by punctuation
            pattern = rf'{{{{\s*{template_name}([^}}]+)?}}}}([{"".join(punctuation_marks)}])'

            # Define the replacement pattern
            replacement = f'\\2{{{{{template_name}\\1}}}}'

            # Perform the replacement using textlib
            text = textlib.replaceExcept(text, pattern, replacement, ['table'])

        return text

    def removeSelfCat(self, text: str) -> str:
        if self.namespace != 14:
            return text

        category_links = textlib.getCategoryLinks(text, site=self.site)

        # Construct new category links without self.title while preserving sortkeys
        new_category_links = []
        for category in category_links:
            if category.title() != self.title:
                sortkey = category.sortKey
                if sortkey:
                    new_category_links.append(f"{category.title()}|{sortkey}")
                else:
                    new_category_links.append(category.title())

        # Replace existing categories with new category links
        text = textlib.replaceCategoryLinks(text, new_category_links, site=self.site)

        return text

    def removeDupeCats(self, text: str) -> str:
        # Extract categories
        categories = textlib.getCategoryLinks(text, self.site)

        seen_categories = {}
        final_categories = []

        # Iterate through categories
        for category in categories:
            cat_title = category.title()
            cat_sortkey = category.sortKey

            if cat_title not in seen_categories:
                # Record the first occurrence of the category
                seen_categories[cat_title] = cat_sortkey
                final_categories.append(category)
            else:
                # Handle duplicate categories
                first_sortkey = seen_categories[cat_title]

                if not first_sortkey and not cat_sortkey:
                    # Skip the current category as it is a duplicate without a sortkey
                    continue

                # If the current category has a sortkey, we keep it and replace the first occurrence
                # if the first occurrence does not have a sortkey
                if not first_sortkey and cat_sortkey:
                    # Replace the first occurrence with the current one
                    final_categories = [cat for cat in final_categories if cat.title() != cat_title]
                    final_categories.append(category)
                    # Update the seen_categories with the new sortkey
                    seen_categories[cat_title] = cat_sortkey

        # Replace the categories in the text
        text = textlib.replaceCategoryLinks(text, final_categories, site=self.site)

        return text

    def removeDupeParam(self, text: str) -> str:
        wikicode = mwparserfromhell.parse(text)

        for template in wikicode.filter_templates():
            params_seen = set()
            for param in template.params:
                param_name = str(param.name).strip()
                if param_name in params_seen and (not param.value.strip()):  # Check for empty values
                    template.remove(param)
                else:
                    params_seen.add(param_name)

        text = str(wikicode)
        return text

    def replaceDeprecatedParams(self, text: str) -> str:

        with open('parambikejson.json', encoding='utf-8') as f:
            alias_dict = json.load(f)

        wikicode = mwparserfromhell.parse(text)

        for template in wikicode.filter_templates():
            template_name = mytools.ucfirst(template.name)

            # Check if the capitalized template name is in alias_dict
            if template_name in alias_dict:
                params_to_replace = alias_dict[template_name]

                # Loop through each parameter in the template
                for param in template.params:
                    param_name = param.name.strip()

                    # Check if the parameter name needs replacing
                    if param_name in params_to_replace:
                        new_param_name = params_to_replace[param_name]
                        param.name = new_param_name

        text = str(wikicode)
        return text

    def fixAgahidankSpace(self, text: str) -> str:
        if self.namespace != 0:
            return text

        wikicode = mwparserfromhell.parse(text)

        for template in wikicode.filter_templates():
            template_name = mytools.ucfirst(template.name)

            if template_name.startswith("Agahîdank"):
                if template.params:
                    # Iterate over the parameters and format them
                    for param in template.params:
                        # Calculate space padding based on the length of the parameter name
                        param_name_length = len(param.name.strip())
                        if param_name_length <= 17:
                            space_padding = " " * (18 - param_name_length) + " "
                        else:
                            space_padding = " "

                        # Add a line break after each parameter value
                        param.value = " " + param.value.strip() + "\n"

                        # Update parameter name with padding
                        param.name = " {}{}".format(param.name.strip(), space_padding)

                    # Add a line break after the template name
                    template.name = template.name.strip() + "\n"
                else:
                    # Handle the case where there are no parameters in the template
                    pass

        return str(wikicode)

    def replaceDeprecatedTemplates(self, text: str) -> str:
        """
        Renames redirected templates from redirected_template_mappings.json for kuwiki
        """

        # Load JSON file containing template name mappings
        with open('redirected_template_mappings.json', encoding='utf-8') as f:
            template_mappings = json.load(f)

        wikicode = mwparserfromhell.parse(text)

        # Iterate over each template in the parsed text
        for template in wikicode.filter_templates():
            old_name = mytools.ucfirst(template.name)
            # Check if the template name exists in the JSON mappings
            if old_name in template_mappings:
                new_name = template_mappings[old_name]["rd_title"]  # Get the new template name
                # Find the position of the old template name in template.name
                start_index = template.name.lower().find(old_name.lower())

                # Replace the old template name with the new one in template.name
                template.name = (
                        template.name[:start_index] + new_name + template.name[start_index + len(old_name):]
                )

        # Convert the modified wikicode back to text
        text = str(wikicode)
        return text

    def fixVrefNames(self, text: str) -> str:
        """
        taken from [[:en:User:Qwerfjkl/VEref.py]] which is itself taken
        from [[:en:User:Psiĥedelisto/VisualEditor ref namer.py]]
        The VisualEditor, (very annoyingly!), doesn't name references added by users, and gives them names like :0, :1, etc. This script fixes that automatically
        Changes some lower case template names to upper and vice versa
        """
        if self.namespace != 0:
            return text

        parsed = mwparserfromhell.parse(text)
        tags = list(filter(None, [t if t.has("name") else None for t in
                                  parsed.ifilter(forcetype=mwparserfromhell.wikicode.Tag, matches="<\\s*ref\\s*",
                                                 recursive=True)]))

        refs = list(
            filter(lambda s: re.search("^:\d+$", str(s.get("name").value)) and not re.search("/>$", str(s)), tags))

        pretty = dict()

        for ref in refs:
            template = ref.contents.get(0)
            if not isinstance(template, mwparserfromhell.nodes.Template):  # Check if template is a Template object
                continue
            if template.has("vauthors"):
                v = str(template.get("vauthors").value)
            elif template.has("authors"):
                v = str(template.get("authors").value)
            elif template.has("paşnav"):
                v = str(template.get("paşnav").value)
            elif template.has("pêşnav"):
                v = str(template.get("pêşnav").value)
            else:
                continue

            v = v.strip()

            if "," in v:
                last = v[:v.index(",")]
            elif " " in v:
                last = v[:v.index(" ")]
            else:
                last = v

            punctuation = set(string.punctuation)

            # Strip punctuation characters from the last word directly
            last = ''.join([char for char in last if char not in punctuation])

            if re.match(r'^[0-9\-.,]+$', last):
                last = False
            else:
                # Check if the last name contains Latin alphabet characters
                latin_alphabet = set(string.ascii_letters)
                if not any(char in latin_alphabet for char in last):
                    last = False

            date = False
            if template.has("tarîx"):
                date = str(template.get("tarîx").value)
            elif template.has("dîrok"):
                date = str(template.get("dîrok").value)
            elif template.has("sal"):
                date = str(template.get("sal").value)

            if date and last:
                match = re.search("\d{4}", date)
                if match:
                    date = match[0]
                    pretty[str(ref.get("name").value)] = "{}{}".format(last, date)

        for tag in parsed.ifilter(forcetype=mwparserfromhell.wikicode.Tag, matches="<\\s*ref\\s*", recursive=True):
            if not tag.has("name"): continue
            k = str(tag.get("name").value)
            if k in pretty:
                tag.attributes[0].value = pretty[k]

        text = str(parsed)
        return text

    def fixSelfInterwiki(self, text: str) -> str:
        """
        Interwiki links to the site itself are displayed like local links.

        Remove their language code prefix.
        """
        if not self.talkpage and pywikibot.calledModuleName() != 'interwiki':
            interwikiR = re.compile(r'\[\[(?: *:)? *{} *: *([^\[\]\n]*)]]'
                                    .format(self.site.code))
            text = interwikiR.sub(r'[[\1]]', text)
        return text

    def fixMainCat(self, text: str) -> str:
        """
        Retrieve the main category from wikidata or create it if need be
        """
        assert self.title is not None

        if self.namespace != 0:
            return text

        if self.is_disambig or self.gotara_zaravayan:
            return text

        categories = textlib.getCategoryLinks(text, site=self.site)
        new_text = text
        if categories:

            main = pywikibot.Category(self.site, 'Category:' + self.title,
                                      sort_key=' ')
            if main in categories:
                return text

            # Get main categories from Wikidata
            maincats = self.get_main_cat(self.title)
            if maincats:
                if VERBOSE:
                    print(f"maincats: {maincats}")
                kuwiki_link = maincats.get('kuwiki')
                enwiki_link = maincats.get('enwiki')

                if kuwiki_link:
                    if VERBOSE:
                        print(f"kuwiki Main cat found: {kuwiki_link}")
                    main = pywikibot.Category(self.site, kuwiki_link, sort_key=' ')
                    if main in categories:
                        categories.pop(categories.index(main))
                    categories.insert(0, main)

                if enwiki_link:
                    if VERBOSE:
                        print(f"enwiki Main cat found: {enwiki_link}")
                    new_main = self.create_main(self.current_page, enwiki_link)
                    if new_main:
                        categories.insert(0, new_main)

                new_text = textlib.replaceCategoryLinks(text, categories,
                                                        site=self.site)
        if ''.join(text.split()) != ''.join(new_text.split()):
            return new_text
        else:
            return text

    def standardizePageFooter(self, text: str) -> str:
        """
        Standardize page footer.

        Makes sure that interwiki links and categories are put
        into the correct position and into the right order. This
        combines the old instances of standardizeInterwiki
        and standardizeCategories.

        The page footer consists of the following parts
        in that sequence:
        1. categories
        2. additional information depending on the local site policy
        3. interwiki
        """
        assert self.title is not None

        categories = []
        interwiki_links = {}

        # get categories
        if not self.template:
            categories = textlib.getCategoryLinks(text, site=self.site)

        subpage = False
        if not self.talkpage:

            if self.template:
                try:
                    tmpl, loc = moved_links[self.site.code]
                    del tmpl
                except KeyError:
                    loc = None
                if loc is not None and loc in self.title:
                    subpage = True

            # get interwiki
            interwiki_links = textlib.getLanguageLinks(
                text, insite=self.site, template_subpage=subpage)

            # remove interwiki
            text = textlib.removeLanguageLinks(text, site=self.site)

        # add categories, main to top
        if categories:

            main = pywikibot.Category(self.site, 'Category:' + self.title,
                                      sort_key=' ')
            if main in categories:
                categories.pop(categories.index(main))
                categories.insert(0, main)

            # Sort categories in alphabetic order
            def kurdish_sort_key(category):

                # Assign each character in the category name its index in the Kurdish alphabet
                kurdish_alphabet = "abccçdeêfghiîjklmnopqrsştuûvwxyzABCCÇDEÊFGHIÎJKLMNOPQRSŞTUÛVWXYZ"
                category_title = category.title()
                return tuple(
                    kurdish_alphabet.index(c) if c in kurdish_alphabet else float('inf') for c in category_title)

            categories.sort(key=kurdish_sort_key)

            text = textlib.replaceCategoryLinks(text, categories,
                                                site=self.site)

        # add interwiki
        if interwiki_links:
            text = textlib.replaceLanguageLinks(text, interwiki_links,
                                                site=self.site,
                                                template=self.template,
                                                template_subpage=subpage)

        return text

    def translateAndCapitalizeNamespaces(self, text: str) -> str:
        """Use localized namespace names.

        .. versionchanged:: 7.4
           No longer expect a specific namespace alias for File:
        """
        # wiki links aren't parsed here.
        exceptions = ['nowiki', 'comment', 'math', 'pre']

        for namespace in self.site.namespaces.values():
            if namespace == 0:
                # skip main (article) namespace
                continue
            # a clone is needed. Won't change the namespace dict
            namespaces = list(namespace)

            # final namespace variant
            final_ns = namespaces.pop(0)
            if namespace in (2, 3):
                # skip localized user namespace, maybe gender is used
                namespaces = ['User' if namespace == 2 else 'User talk']
            # lowerspaced and underscored namespaces
            for i, item in enumerate(namespaces):
                item = item.replace(' ', '[ _]')
                item = f'[{item[0]}{item[0].lower()}]' + item[1:]
                namespaces[i] = item
            namespaces.append(first_lower(final_ns))
            if final_ns and namespaces:
                text = textlib.replaceExcept(
                    text,
                    r'\[\[\s*({}) *:(?P<nameAndLabel>.*?)\]\]'
                    .format('|'.join(namespaces)),
                    fr'[[{final_ns}:\g<nameAndLabel>]]',
                    exceptions)
        return text

    def translateMagicWords(self, text: str) -> str:
        """Use localized magic words."""

        def init_cache() -> None:
            for magicword in ('img_thumbnail', 'img_left', 'img_center',
                              'img_right', 'img_none', 'img_framed',
                              'img_frameless', 'img_border', 'img_upright',
                              'img_baseline', 'img_sub', 'img_super',
                              'img_top', 'img_text_top', 'img_middle',
                              'img_bottom', 'img_text_bottom'):
                aliases = self.site.getmagicwords(magicword)
                if len(aliases) > 1:
                    cache.update((alias, aliases[0]) for alias in aliases[1:]
                                 if '$1' not in alias)
            if not cache:
                cache[False] = True  # signal there is nothing to replace

        def replace_magicword(match: Match[str]) -> str:
            if cache.get(False):
                return match.group()
            split = match.group().split('|')
            if len(split) == 1:
                return match.group()

            if not cache:
                init_cache()

            # push ']]' out and re-add below
            split[-1] = split[-1][:-2]
            return '{}|{}]]'.format(
                split[0], '|'.join(cache.get(x.strip(), x) for x in split[1:]))

        cache: Dict[Union[bool, str], Any] = {}
        exceptions = ['comment', 'nowiki', 'pre', 'syntaxhighlight']
        regex = re.compile(
            FILE_LINK_REGEX % '|'.join(self.site.namespaces[6]),
            flags=re.X)
        return textlib.replaceExcept(
            text, regex, replace_magicword, exceptions)

    def cleanUpLinks(self, text: str) -> str:
        """Tidy up wikilinks found in a string.

        This function will:

        * Replace underscores with spaces
        * Move leading and trailing spaces out of the wikilink and into the
          surrounding text
        * Convert URL-encoded characters into Unicode-encoded characters
        * Move trailing characters out of the link and make the link without
          using a pipe, if possible
        * Capitalize the article title of the link, if appropriate

        .. versionchanged:: 8.4
           Convert URL-encoded characters if a link is an interwiki link
           or different from main namespace.

        :param text: string to perform the clean-up on
        :return: text with tidied wikilinks
        """

        # helper function which works on one link and either returns it
        # unmodified, or returns a replacement.
        def handleOneLink(match: Match[str]) -> str:
            # Convert URL-encoded characters to str
            titleWithSection = url2string(match['titleWithSection'],
                                          encodings=self.site.encodings())
            label = match['label']
            trailingChars = match['linktrail']
            newline = match['newline']
            # entire link but convert URL-encoded text
            oldlink = url2string(match.group(),
                                 encodings=self.site.encodings())

            is_interwiki = self.site.isInterwikiLink(titleWithSection)
            if is_interwiki:
                return oldlink

            # The link looks like this:
            # [[page_title|link_text]]trailing_chars
            # We only work on namespace 0 because pipes and linktrails work
            # differently for images and categories.
            page = pywikibot.Page(pywikibot.Link(titleWithSection, self.site))
            try:
                in_main_namespace = page.namespace() == 0
            except InvalidTitleError:
                in_main_namespace = False
            if not in_main_namespace:
                return oldlink

            # Replace underlines by spaces, also multiple underlines
            titleWithSection = re.sub('_+', ' ', titleWithSection)
            # Remove double spaces
            titleWithSection = re.sub('  +', ' ', titleWithSection)
            # Remove unnecessary leading spaces from title,
            # but remember if we did this because we eventually want
            # to re-add it outside of the link later.
            titleLength = len(titleWithSection)
            titleWithSection = titleWithSection.lstrip()
            hadLeadingSpaces = len(titleWithSection) != titleLength
            hadTrailingSpaces = False
            # Remove unnecessary trailing spaces from title,
            # but remember if we did this because it may affect
            # the linktrail and because we eventually want to
            # re-add it outside of the link later.
            if not trailingChars:
                titleLength = len(titleWithSection)
                titleWithSection = titleWithSection.rstrip()
                hadTrailingSpaces = len(titleWithSection) != titleLength

            if not titleWithSection:
                # just skip empty links.
                return match.group()

            # Remove unnecessary initial and final spaces from label.
            # Please note that some editors prefer spaces around pipes.
            # (See [[en:Wikipedia:Semi-bots]]). We remove them anyway.
            if label is not None:
                # Remove unnecessary leading spaces from label,
                # but remember if we did this because we want
                # to re-add it outside of the link later.
                labelLength = len(label)
                label = label.lstrip()
                hadLeadingSpaces = len(label) != labelLength
                # Remove unnecessary trailing spaces from label,
                # but remember if we did this because it affects
                # the linktrail.
                if not trailingChars:
                    labelLength = len(label)
                    label = label.rstrip()
                    hadTrailingSpaces = len(label) != labelLength
            else:
                label = titleWithSection
            if trailingChars:
                label += trailingChars

            if self.site.siteinfo['case'] == 'first-letter':
                firstcase_title = first_lower(titleWithSection)
                firstcase_label = first_lower(label)
            else:
                firstcase_title = titleWithSection
                firstcase_label = label

            if firstcase_label == firstcase_title:
                newLink = f'[[{label}]]'
            # Check if we can create a link with trailing characters
            # instead of a pipelink
            elif (firstcase_label.startswith(firstcase_title)
                  and trailR.sub('', label[len(titleWithSection):]) == ''):
                newLink = '[[{}]]{}'.format(label[:len(titleWithSection)],
                                            label[len(titleWithSection):])

            else:
                # Try to capitalize the first letter of the title.
                # Not useful for languages that don't capitalize nouns.
                # TODO: Add a configuration variable for each site,
                # which determines if the link target is written in
                # uppercase
                if self.site.sitename == 'wikipedia:de':
                    titleWithSection = first_upper(titleWithSection)
                newLink = f'[[{titleWithSection}|{label}]]'
            # re-add spaces that were pulled out of the link.
            # Examples:
            #   text[[ title ]]text        -> text [[title]] text
            #   text[[ title | name ]]text -> text [[title|name]] text
            #   text[[ title |name]]text   -> text[[title|name]]text
            #   text[[title| name]]text    -> text [[title|name]]text
            if hadLeadingSpaces and not newline:
                newLink = ' ' + newLink
            if hadTrailingSpaces:
                newLink += ' '
            if newline:
                newLink = newline + newLink
            return newLink

        trailR = re.compile(self.site.linktrail())
        # The regular expression which finds links. Results consist of four groups:
        # group <newline> depends whether the links starts with a new line.
        # group <titleWithSection> is the page title and section, that is,
        # everything before | or ]. It'll include the # to make life easier for us.
        # group <label> is the alternative link title between | and ].
        # group <linktrail> is the link trail after ]] which are part of the word.
        # note that the definition of 'letter' varies from language to language.
        linkR = re.compile(
            r'(?P<newline>[\n]*)\[\[(?P<titleWithSection>[^\]\|]+)'
            r'(\|(?P<label>[^]|]*))?]](?P<linktrail>'
            + self.site.linktrail() + ')')

        text = textlib.replaceExcept(text, linkR, handleOneLink,
                                     ['comment', 'math', 'nowiki', 'pre',
                                      'startspace'])
        return text

    def resolveHtmlEntities(self, text: str) -> str:
        """Replace HTML entities with string."""
        ignore = [
            38,  # Ampersand (&amp;)
            39,  # Single quotation mark (&quot;) per T26093
            60,  # Less than (&lt;)
            62,  # Greater than (&gt;)
            91,  # Opening square bracket ([)
            # - sometimes used intentionally inside links
            93,  # Closing square bracket (])
            # - used intentionally inside links
            124,  # Vertical bar (|)
            # - used intentionally in navigation bar templates on w:de
            160,  # Non-breaking space (&nbsp;)
            # - not supported by Firefox textareas
            173,  # Soft-hypen (&shy;) - enable editing
            8206,  # Left-to-right mark (&ltr;)
            8207,  # Right-to-left mark (&rtl;)
        ]
        if self.template:
            ignore.append(32)  # Space ( )
            ignore.append(58)  # Colon (:)
        # TODO: T254350 - what other extension tags should be avoided?
        # (graph, math, score, timeline, etc.)
        text = pywikibot.html2unicode(
            text, ignore=ignore, exceptions=['comment', 'syntaxhighlight'])
        return text

    def removeUselessSpaces(self, text: str) -> str:
        """Cleanup multiple or trailing spaces."""
        exceptions = ['comment', 'math', 'nowiki', 'pre', 'syntaxhighlight',
                      'startspace', 'table', 'template']
        text = textlib.replaceExcept(text, r'(?m)[\t ]+( |$)', r'\1',
                                     exceptions, site=self.site)
        text = re.sub(r'\n\n\n*', '\n\n', text)
        return text

    def removeNonBreakingSpaceBeforePercent(self, text: str) -> str:
        """
        Remove a non-breaking space between number and percent sign.

        Newer MediaWiki versions automatically place a non-breaking space in
        front of a percent sign, so it is no longer required to place it
        manually.
        """
        text = textlib.replaceExcept(
            text, r'(\d)&(?:nbsp|#160|#x[Aa]0);%', r'\1 %', ['timeline'])
        return text

    def cleanUpSectionHeaders(self, text: str) -> str:
        """
        Add a space between the equal signs and the section title.

        Example::

            ==Section title==

        becomes::

        == Section title ==

        .. note:: This space is recommended in the syntax help on the
           English and German Wikipedias. It is not wanted on Lojban and
           English Wiktionaries (:phab:`T168399`, :phab:`T169064`) and
           it might be that it is not wanted on other wikis. If there
           are any complaints, please file a bug report.
        """
        return textlib.replaceExcept(
            text,
            r'(?m)^(={1,6})[ \t]*(?P<title>.*[^\s=])[ \t]*\1[ \t]*\r?\n',
            r'\1 \g<title> \1\n',
            ['comment', 'math', 'nowiki', 'pre'])

    def putSpacesInLists(self, text: str) -> str:
        """
        Add a space between the * or # and the text.

        .. note:: This space is recommended in the syntax help on the
           English, German and French Wikipedias. It might be that it
           is not wanted on other wikis. If there are any complaints,
           please file a bug report.
        """
        if not self.template:
            exceptions = ['comment', 'math', 'nowiki', 'pre',
                          'syntaxhighlight', 'template', 'timeline',
                          self.site.redirect_regex]
            text = textlib.replaceExcept(
                text,
                r'(?m)'
                r'^(?P<bullet>[:;]*(\*+|#+)[:;\*#]*)(?P<char>[^\s\*#:;].+?)',
                r'\g<bullet> \g<char>',
                exceptions)
        return text

    # from fixes.py
    def fixSyntaxSave(self, text: str) -> str:
        """Convert weblinks to wikilink, fix link syntax."""

        def replace_link(match: Match[str]) -> str:
            """Create a string to replace a single link."""
            replacement = '[['
            if re.match(r'(?:{}):'
                                .format('|'.join((*self.site.namespaces[6],
                                                  *self.site.namespaces[14]))),
                        match['link']):
                replacement += ':'

            replacement += match['link']
            if match['title']:
                replacement += '|' + match['title']

            return replacement + ']]'

        exceptions = ['comment', 'math', 'nowiki', 'pre', 'startspace',
                      'syntaxhighlight']

        # link to the wiki working on
        # Only use suffixes for article paths
        for suffix in self.site._interwiki_urls(True):
            http_url = self.site.base_url(suffix, 'http')
            if self.site.protocol() == 'http':
                https_url = None
            else:
                https_url = self.site.base_url(suffix, 'https')

            # compare strings without the protocol, if they are empty support
            # also no prefix (//en.wikipedia.org/…)
            http = urlparse(http_url)
            https = urlparse(https_url)
            if https_url is not None and http.netloc == https.netloc:
                urls = ['(?:https?:)?'
                        + re.escape(urlunparse(('', *http[1:])))]
            else:
                urls = [re.escape(url) for url in (http_url, https_url)
                        if url is not None]

            for url in urls:
                # unescape {} placeholder
                url = url.replace(r'\{\}', '{title}')

                # Only include links which don't include the separator
                # as the wikilink won't support additional parameters
                separator = '?&' if '?' in suffix else '?'

                # Match first a non space in the title to prevent that multiple
                # spaces at the end without title will be matched by it
                title_regex = (r'(?P<link>[^{sep}]+?)'
                               r'(\s+(?P<title>[^\s].*?))'
                               .format(sep=separator))
                url_regex = fr'\[\[?{url}?\s*\]\]?'
                text = textlib.replaceExcept(
                    text,
                    url_regex.format(title=title_regex),
                    replace_link, exceptions, site=self.site)

        # external link in/starting with double brackets
        text = textlib.replaceExcept(
            text,
            r'\[\[(?P<url>https?://[^\]]+?)\]\]?',
            r'[\g<url>]', exceptions, site=self.site)

        # external link and description separated by a pipe, with
        # whitespace in front of the pipe, so that it is clear that
        # the dash is not a legitimate part of the URL.
        text = textlib.replaceExcept(
            text,
            r'\[(?P<url>https?://[^\|\] \r\n]+?) +\| *(?P<label>[^\|\]]+?)\]',
            r'[\g<url> \g<label>]', exceptions)

        # dash in external link, where the correct end of the URL can
        # be detected from the file extension. It is very unlikely that
        # this will cause mistakes.
        extensions = [fr'\.{ext}'
                      for ext in ['pdf', 'html?', 'php', 'aspx?', 'jsp']]
        text = textlib.replaceExcept(
            text,
            r'\[(?P<url>https?://[^\|\] ]+?(' + '|'.join(extensions) + r')) *'
                                                                       r'\| *(?P<label>[^\|\]]+?)\]',
            r'[\g<url> \g<label>]', exceptions)
        return text

    def fixHtml(self, text: str) -> str:
        """Relace html markups with wikitext markups."""

        def replace_header(match: Match[str]) -> str:
            """Create a header string for replacing."""
            depth = int(match[1])
            return r'{0} {1} {0}'.format('=' * depth, match[2])

        # Everything case-insensitive (?i)
        # Keep in mind that MediaWiki automatically converts <br> to <br />
        exceptions = ['comment', 'math', 'nowiki', 'pre', 'startspace',
                      'syntaxhighlight']
        text = textlib.replaceExcept(text, r'(?i)<(b|strong)>(.*?)</\1>',
                                     r"'''\2'''", exceptions, site=self.site)
        text = textlib.replaceExcept(text, r'(?i)<(i|em)>(.*?)</\1>',
                                     r"''\2''", exceptions, site=self.site)
        # horizontal line without attributes in a single line
        text = textlib.replaceExcept(text, r'(?i)([\r\n])<hr[ /]*>([\r\n])',
                                     r'\1----\2', exceptions)
        # horizontal line with attributes; can't be done with wiki syntax
        # so we only make it XHTML compliant
        text = textlib.replaceExcept(text, r'(?i)<hr ([^>/]+?)>',
                                     r'<hr \1 />',
                                     exceptions)
        # a header where only spaces are in the same line
        text = textlib.replaceExcept(
            text,
            r'(?i)(?<=[\r\n]) *<h([1-7])> *([^<]+?) *</h\1> *(?=[\r\n])',
            replace_header,
            exceptions)
        # TODO: maybe we can make the bot replace <p> tags with \r\n's.
        return text

    def fixReferences(self, text: str) -> str:
        """Fix references tags."""
        # See also
        # https://en.wikipedia.org/wiki/User:AnomieBOT/source/tasks/OrphanReferenceFixer.pm
        if self.namespace != 0:
            return text
        exceptions = ['comment', 'math', 'nowiki', 'pre', 'syntaxhighlight',
                      'startspace']

        # it should be name = " or name=" NOT name   ="
        text = re.sub(r'(?i)<ref +name(= *| *=)"', r'<ref name="', text)
        # remove empty <ref/>-tag
        text = textlib.replaceExcept(text,
                                     r'(?i)(<ref\s*/>|<ref *>\s*</ref>)',
                                     r'', exceptions)
        text = textlib.replaceExcept(text,
                                     r'(?i)<ref\s+([^>]+?)\s*>\s*</ref>',
                                     r'<ref \1/>', exceptions)
        return text

    def fixStyle(self, text: str) -> str:
        """Convert prettytable to wikitable class."""
        if self.namespace != 0:
            return text

        exceptions = ['comment', 'math', 'nowiki', 'pre', 'startspace',
                      'syntaxhighlight']

        text = textlib.replaceExcept(text,
                                     r'(class="[^"]*)prettytable([^"]*")',
                                     r'\1wikitable\2', exceptions)
        return text

    def fixTypo(self, text: str) -> str:
        """Fix units."""
        if self.namespace != 0:
            return text
        exceptions: List[Union[str, Pattern[str]]] = [
            'comment',
            'gallery',
            'hyperlink',
            'interwiki',
            'link',
            'nowiki',
            'math',
            'pre',
            'startspace',
            'syntaxhighlight',
        ]

        # change <number> ccm -> <number> cm³
        text = textlib.replaceExcept(text, r'(\d)\s*(?:&nbsp;)?ccm',
                                     r'\1&nbsp;cm³', exceptions,
                                     site=self.site)
        # Solve wrong Nº sign with °C or °F
        # additional exception requested on fr-wiki for this stuff
        pattern = re.compile('«.*?»')
        exceptions.append(pattern)
        text = textlib.replaceExcept(text, r'(\d)\s*(?:&nbsp;)?[º°]([CF])',
                                     r'\1&nbsp;°\2', exceptions,
                                     site=self.site)
        text = textlib.replaceExcept(text, 'º([CF])', '°' + r'\1',
                                     exceptions,
                                     site=self.site)
        return text

    def fix_ISBN(self, text: str) -> str:
        """Hyphenate ISBN numbers."""
        return _reformat_ISBNs(text, strict=self.ignore != CANCEL.MATCH)

    def addMultIssues(self, text: str) -> str:
        if self.namespace != 0:
            return text

        if self.is_disambig or self.gotara_zaravayan:
            return text

        hisyarde_templates = mytools.get_cat_members(self.site, "Şablonên hişyarde", 10)
        sernav_templates = mytools.get_cat_members(self.site, "Şablonên ji bo sererastkirina sernavê rûpelê", 10)

        cleanup_templates = mytools.get_cat_members(self.site, "Şablonên hişyarde ji bo gotaran", 10)
        cleanup_templates.remove("Çend problem")

        wikicode = mwparserfromhell.parse(text)
        sections = wikicode.get_sections(include_lead=True)

        lead_section = sections[0]  # The lead section is always the first one

        existing_problems = None
        removed_cleanup_templates = []
        removed_hisyarde_templates = []
        removed_agahidank_templates = []
        removed_sernav_templates = []

        for template in lead_section.filter_templates():

            if mytools.ucfirst(template.name) == "Çend problem":
                if template.has(1):
                    existing_problems = str(template.get(1).value).strip()
                    lead_section.remove(template)

        for template in lead_section.filter_templates():
            template_name = mytools.ucfirst(template.name)

            if template_name in cleanup_templates:
                removed_cleanup_templates.append(template)
                lead_section.remove(template)

            if template_name in hisyarde_templates:
                removed_hisyarde_templates.append(template)
                lead_section.remove(template)

            if template_name in sernav_templates:
                removed_sernav_templates.append(template)
                lead_section.remove(template)

            if template_name.startswith("Agahîdank"):
                removed_agahidank_templates.append(template)
                lead_section.remove(template)

        if removed_agahidank_templates:
            readding_agahidank = "\n".join([str(template) for template in removed_agahidank_templates])
            lead_section.insert(0, readding_agahidank)

        readding_cleanup = ""

        if existing_problems:
            readding_cleanup += "\n" + existing_problems + "\n"

        if removed_cleanup_templates:
            readding_cleanup += "\n".join([str(template) for template in removed_cleanup_templates]) + "\n"

        if readding_cleanup.strip():
            if existing_problems:
                new_template = mwparserfromhell.nodes.Template("Çend problem")
                new_template.add(1, readding_cleanup)
                lead_section.insert(0, new_template)
            elif not existing_problems and len(removed_cleanup_templates) > 2:
                readding_cleanup = "\n" + readding_cleanup
                new_template = mwparserfromhell.nodes.Template("Çend problem")
                new_template.add(1, readding_cleanup)
                lead_section.insert(0, new_template)
            else:
                lead_section.insert(0, readding_cleanup)

        if removed_hisyarde_templates:
            readding_hisyarde = "\n".join([str(template) for template in removed_hisyarde_templates]) + "\n"
            lead_section.insert(0, readding_hisyarde)

        if removed_sernav_templates:
            readding_sernav = "\n".join([str(template) for template in removed_sernav_templates])
            lead_section.insert(0, readding_sernav)

        # Replace the lead section in the original wikicode object
        sections[0] = lead_section

        # Recombine the sections back into the full text
        final_text = str(wikicode)

        if ''.join(text.split()) == ''.join(final_text.split()):
            return text
        else:
            final_text = self.fixCudakirinPlace(final_text)
            return final_text

    def fixCudakirinPlace(self, text: str) -> str:
        if self.namespace != 0:
            return text

        cudakirin_templates = mytools.get_cat_members(self.site, "Şablon (cudakirin)", 10)

        wikicode = mwparserfromhell.parse(text)

        removed_templates = []

        for template in wikicode.filter_templates():
            if mytools.ucfirst(template.name) in cudakirin_templates:
                removed_templates.append(template)
                wikicode.remove(template)

        if removed_templates:
            # Concatenate first_val with removed_templates
            concatenated_val = "\n".join([str(template) for template in removed_templates]) + "\n"

            wikicode.insert(0, concatenated_val)

        new_text = str(wikicode)
        if ''.join(text.split()) == ''.join(new_text.split()):
            new_text = text
        return new_text

    def addOrphanTag(self, text: str) -> str:
        if self.namespace != 0:
            return text

        if self.is_disambig or self.gotara_zaravayan:
            return text

        if self.contains_sewi_cat:
            return text

        if not self.is_sewi:
            return text

        tarix = mytools.get_cur_month_year()

        text = "{{Sêwî|tarîx=" + tarix + "}}\n" + text
        return text

    def removeOrphanTag(self, text: str) -> str:
        if self.namespace != 0:
            return text

        if self.is_disambig or self.gotara_zaravayan:
            return text

        if not self.contains_sewi_cat:
            return text

        if self.is_sewi:
            return text

        text = mytools.remove_template(text, "Sêwî")

        return text

    def addStubTag(self, text: str) -> str:
        if self.namespace != 0:
            return text

        if self.is_disambig or self.gotara_zaravayan:
            return text

        if self.contains_sitil_cat:
            return text

        if not self.is_sitil:
            return text
        sitil_text = "{{Şitil}}"
        text = textlib.add_text(text, sitil_text)
        return text

    def removeStubTag(self, text: str) -> str:
        if self.namespace != 0:
            return text

        if self.is_disambig or self.gotara_zaravayan:
            return text

        if not self.contains_sitil_cat:
            return text

        if self.is_sitil:
            return text

        text = mytools.remove_template(text, "Şitil")

        # Find and remove other templates
        template_regex = r'{{\s*([^\}]+\-şitil|[Şş]iti?l|[Kk]urt|[Ss]tub|[Şş]itlek|[^\}]+\-şitil\-[^\}]+)\s*}}'
        text = re.sub(template_regex, '', text)

        # Find and remove other templates
        template_sitil_regex = r'{{\s*([Şş]itil-[^\}]+)\s*}}'
        text = re.sub(template_sitil_regex, '', text)

        return text