Bikarhêner:Balyozxane/test.py

Ji Wîkîpediya, ensîklopediya azad.
import requests
import tweepy
import pywikibot
import os
import mwparserfromhell
from keys import consumer_key, consumer_secret, access_token, access_token_secret, bearer_token

client = tweepy.Client(bearer_token, consumer_key, consumer_secret, access_token, access_token_secret)


def get_twitter_conn_v1(api_key, api_secret, access_token, access_token_secret) -> tweepy.API:
    """Get twitter conn 1.1"""

    auth = tweepy.OAuth1UserHandler(api_key, api_secret)
    auth.set_access_token(
        access_token,
        access_token_secret,
    )
    return tweepy.API(auth)


client_v1 = get_twitter_conn_v1(consumer_key, consumer_secret, access_token, access_token_secret)


def tweet(text, media_id):
    client.create_tweet(text=text, media_ids=[media_id])


def get_image(media_url, save_dir):
    # Set up Pywikibot
    commons = pywikibot.Site('commons', 'commons')

    # Get the File page object
    file_page = pywikibot.FilePage(commons, 'File:' + media_url)

    # Get the file URL
    file_url = file_page.get_file_url()

    # Extract file name from the URL
    file_name = os.path.basename(file_url)
    file_path = os.path.join(save_dir, file_name)  # Construct the full file path

    # Download the image
    try:
        success = file_page.download(filename=file_path)
        if success:
            print("Image downloaded successfully and saved as", file_path)
            return file_path  # Return the full file path
        else:
            print("Failed to download the image")
            return None  # Return None if failed to download
    except IOError as e:
        print(f"Failed to download the image: {e}")
        return None  # Return None if failed to download


def parse_week(page_title):
    # Set up Pywikibot
    site = pywikibot.Site("ku", "wikipedia")

    # Get the page object
    page = pywikibot.Page(site, page_title)

    # Fetch the page content
    try:
        text = page.text
    except pywikibot.NoPage:
        print("Page does not exist.")
        return None

    # Parse the page content using mwparserfromhell
    parsed_text = mwparserfromhell.parse(text)

    # Find the template named "ceribandin"
    for template in parsed_text.filter_templates(matches="Ceribandin"):

        # Get parameter values
        wene = template.get("wêne").value.strip()
        gotar = template.get("gotar").value.strip()
        nivis = str(template.get("nivîs").value).strip()  # Get the wikicode as a string

        # Convert the wikicode to plain text
        nivis_text = mwparserfromhell.parse(nivis).strip_code()

        return {"wene": wene, "gotar": gotar, "nivis": nivis_text}

    # If "ceribandin" template is not found
    print("The 'ceribandin' template is not found on the page.")
    return None


save_directory = os.path.expanduser("~")
save_directory = os.path.join(save_directory, "Pywikibot")

# Example usage:
result = parse_week("Şablon:GH/2024/16")
if result:
    print("Wêne:", result["wene"])
    print("Gotar:", result["gotar"])
    print("Nivîs:", result["nivis"])


    # Example usage:
    media_path = get_image(result["wene"], save_directory)

    media = client_v1.media_upload(filename=media_path)
    media_id = media.media_id

    url = f"https://ku.wikipedia.org/wiki/{result['gotar'].replace(' ', '_')}"
    tweet_text = (f"Gotara hefteyê ya Wîkîpediyayê\n\n"
                  f"{result['gotar']}\n"
                  f"{url}")
    tweet(tweet_text, media_id)