吾爱破解 - LCG - LSG |安卓破解|病毒分析|www.52pojie.cn

 找回密码
 注册[Register]

QQ登录

只需一步,快速开始

查看: 1872|回复: 11
收起左侧

[Python 原创] python 爬取平板电子书小说网

[复制链接]
fanSLiang 发表于 2023-2-1 18:40
[Python] 纯文本查看 复制代码
"""
 [url=home.php?mod=space&uid=238618]@Time[/url]    : 2023/1/5 15:53
 [url=home.php?mod=space&uid=686208]@AuThor[/url]  : FanSL
 [url=home.php?mod=space&uid=267492]@file[/url]    : 2023-1-5平板电子书下载小说.py
"""
import os
import shutil
import re
from lxml import etree
from tqdm import trange
import aiohttp
import asyncio
import time
from bs4 import BeautifulSoup
import requests


# 打印name_url
def print_name_url(name_url, count):
    for num, key in zip(range(1, count + 1), name_url):
        print(str(num) + ":{0: <20s}{1: <20s}{2: <40s}".format(key[0], key[2], key[3]))


# 输入书名 返回name_url
def get_name_url(url, book_name):
    search_url = url + "modules/article/search.php?searchkey=" + book_name
    with requests.get(search_url) as response:
        response.encoding = "utf-8"
        source_code = response.text
    obj = re.compile(
        r'《<a href="(?P<url>.*?)">(?P<name>.*?)</a>》.*?((([0-9]{3}[1-9]'
        r'|[0-9]{2}[1-9][0-9]{1}|[0-9]{1}[1-9][0-9]{2}|[1-9][0-9]{3})-'
        r'(((0[13578]|1[02])-(0[1-9]|[12][0-9]|3[01]))|((0[469]|11)-(0[1-9]'
        r'|[12][0-9]|30))|(02-(0[1-9]|[1][0-9]|2[0-8]))))|((([0-9]{2})(0[48]'
        r'|[2468][048]|[13579][26])|((0[48]|[2468][048]|[3579][26])00))-02-29))'
        r'\s(?P<type>.*?)小说</div>.*?最新章节:.*?>(?P<lastchapter>.*?)</a>',
        re.S)
    res = obj.finditer(source_code)
    name_url = []
    count = 0
    for i in res:
        name_url.append([])
        name_url[count].append(i.group("name"))
        name_url[count].append(i.group("url"))
        name_url[count].append(i.group("type"))
        name_url[count].append(i.group("lastchapter"))
        count = count + 1
    return name_url, count  # name_url为小说列表 count为列表长度


# 输入id 返回main_url
def get_main_url(url, book_id, name_url):
    novel_intro_url = name_url[int(book_id) - 1][1]
    with requests.get(novel_intro_url) as response:
        response.encoding = "utf-8"
        intro_code = response.text
        intro_code = etree.HTML(intro_code)
    href = intro_code.xpath("/html/body/div/div[4]/div[1]/p[1]/a[1]/@href")
    main_url = url + href[0]
    return main_url


# 输入下载网页地址 返回下载地址
def get_download_url(download_page_url, book_name):
    download_url = download_page_url.rstrip('.html').replace('xiazai', '').replace('www',
                                                                                   'txt') + '/' + book_name + '.txt'
    return download_url


# 输入main_url 返回详情页
def get_detail(url, main_url):
    detail = {}
    with requests.get(main_url) as response:
        response.encoding = "utf-8"
        main_code = response.text
        soup = BeautifulSoup(main_code, "html.parser")
        download_page_url = soup.find("div", class_="info").find("a", class_="txt")["href"]
        author = soup.find("span", class_="author").text[2:]
        intro = soup.find("div", class_="intro").contents[1].text.strip("\n")
        book_name = soup.find("h1").text
        download_url = get_download_url(download_page_url, book_name)
    detail["book_name"] = book_name
    detail["author"] = author
    detail["intro"] = intro
    detail["main_url"] = main_url
    detail["download_url"] = download_url
    return detail


# 下载小说
def download_novel_by_ori_url(download_path, book_name, download_url):
    with requests.get(download_url) as download:
        with open(download_path + book_name + ".txt", "wb") as f:
            if download.status_code == 200:
                f.write(download.content)
                return True
            else:
                return False


# 如果文件夹不存在就创建,如果文件存在就清空!
def RemoveDir(filepath):
    if not os.path.exists(filepath):
        os.mkdir(filepath)
    else:
        shutil.rmtree(filepath)
        os.mkdir(filepath)


# 获取章节及连接列表 字典
def get_chapters(main_url):
    chapters = {}
    with requests.get(main_url) as request:
        request.encoding = 'utf-8'
        soup = BeautifulSoup(request.text, 'lxml')
        chapters_temp = soup.find('div', class_='list').find('dl').find_all('a')
    for i, chapter in zip(range(1, len(chapters_temp) + 1), chapters_temp):
        chapters[str(i) + '、、' + chapter.text] = main_url + chapter.get('href')
    return chapters


# 保存detail chapter文件
def write_detail_chapters(temp_path, novel_name, detail, chapters):
    if not os.path.exists(temp_path):
        os.mkdir(temp_path)
    RemoveDir(temp_path + novel_name)
    RemoveDir(temp_path + novel_name + '/logs')
    with open(temp_path + novel_name + '/' + 'information.txt', 'w', encoding='utf-8') as f:
        for key, value in detail.items():
            f.write(key + ':' + value + '\n')
        f.write('\n')
        for key, value in chapters.items():
            f.write(key + ':' + value + '\n')
    return temp_path + novel_name + '/logs/'


# 输入logs_path、chapter_name写入txt文件
def write_logs_(logs_path, chapter_name, chapter):
    with open(logs_path + chapter_name + '.txt', 'w', encoding='utf-8') as f:
        for key, value in chapter.items():
            f.write(key + '\n')
            f.write(value.replace("\n    ","    ").replace("    ","\n    ") + '\n')


# novel_pack
def novel_pack(download_path, logs_path, detail, chapters):
    with open(download_path + detail['book_name'] + '.txt', mode='w', encoding='utf-8') as book_file:
        for key, tq in zip(chapters.keys(), trange(len(chapters), desc='正在打包')):
            with open(logs_path + key + '.txt', mode='r', encoding='utf-8') as chapter_file:
                book_file.write(chapter_file.read())
                book_file.write('\n')


# 去杂
def remove_impurities(download_path,book_name):
    novel_path = download_path + book_name + '.txt'
    with open(novel_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    with open(novel_path, 'w', encoding='utf-8') as f:
        for line in lines:
            result = re.search('txt下载地址.*?谢谢您的支持!!', line)
            if result is not None:
                impurities = result.group()
                line = line.replace(impurities, ' ')
            f.write(line)


# 下载章节
async def download_chapter(logs_path, chapter_name, chapter_href, session):
    chapter = {}
    async with session.get(chapter_href) as request:
        request.encoding = 'utf-8'
        request_text = await request.text()
        soup = BeautifulSoup(request_text, 'html.parser')
        chapter_content = soup.find('div', class_='content').text
    chapter[chapter_name.split('、、')[-1]] = chapter_content
    write_logs_(logs_path, chapter_name, chapter)


# 下载
async def aio_download(logs_path, chapters):
    tasks = []
    async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(limit=64, ssl=False)) as session:
        for chapter_name, chapter_href in chapters.items():
            tasks.append(asyncio.create_task(download_chapter(logs_path, chapter_name, chapter_href, session)))
        await asyncio.wait(tasks)


# 异步协成下载小说
def aio_main_download(download_path, temp_path, detail):
    chapters = get_chapters(detail['main_url'])
    logs_path = write_detail_chapters(temp_path, detail['book_name'], detail, chapters)
    asyncio.run(aio_download(logs_path, chapters))
    novel_pack(download_path, logs_path, detail, chapters)


# main function
def main(url, download_path, temp_path):
    book_name = input("请输入小说书名:")
    name_url, count = get_name_url(url, book_name)
    print_name_url(name_url, count)
    booK_id = input("请输入下载的小说序号:")
    main_url = get_main_url(url, booK_id, name_url)
    detail = get_detail(url, main_url.replace("com//", "com/"), )
    print(detail)
    print("开始下载--------------------------------")
    start_time = time.time()
    is_over = download_novel_by_ori_url(download_path, detail['book_name'], detail['download_url'])
    # is_over = False
    if not is_over:
        print("原网站提供的txt下载失败,开始异步协程下载。")
        aio_main_download(download_path, temp_path, detail)
    end_time = time.time()
    print("下载完成--------------------------------")
    print(f"耗时:{round(end_time - start_time, 2)} s")
    delete_logs = input("是否保留(1)日志文件:")
    if delete_logs != '1':
        logs_path = temp_path + detail['book_name'] + '/'
        RemoveDir(logs_path)
        os.removedirs(logs_path)
    print("开始去杂--------------------------------")
    remove_impurities(download_path,detail['book_name'])
    print("去杂完成--------------------------------")


# main
if __name__ == '__main__':
    url = "http://www.qiuyelou.com/"
    download_path = "小说下载/"
    temp_path = "小说下载/temp/"
    # --------------------------------------------
    main(url, download_path, temp_path)


运行结果

运行结果

运行结果

运行结果

免费评分

参与人数 2吾爱币 +8 热心值 +2 收起 理由
苏紫方璇 + 7 + 1 欢迎分析讨论交流,吾爱破解论坛有你更精彩!
李玉风我爱你 + 1 + 1 BeautifulSoup、正则、xpath都用上了

查看全部评分

本帖被以下淘专辑推荐:

发帖前要善用论坛搜索功能,那里可能会有你要找的答案或者已经有人发布过相同内容了,请勿重复发帖。

一只大菜猫 发表于 2023-2-1 21:08
不错,入门级
TPL 发表于 2023-2-1 21:20
s757129 发表于 2023-2-1 22:08
zixudaoxian 发表于 2023-2-2 09:37
可以可以,过段时间学习一下python
bestwars 发表于 2023-2-2 19:54
学习一下python
bdywbhyw 发表于 2023-2-3 09:40
先保存,以后用到爬虫的时候再学习。
安歌 发表于 2023-3-4 10:09
哪来学习一下
呆比久腻我 发表于 2023-3-14 17:57
学习交流  好厉害
kanikani07 发表于 2023-3-14 18:56
这个代码非常实用,感谢楼主!
您需要登录后才可以回帖 登录 | 注册[Register]

本版积分规则 警告:本版块禁止灌水或回复与主题无关内容,违者重罚!

快速回复 收藏帖子 返回列表 搜索

RSS订阅|小黑屋|处罚记录|联系我们|吾爱破解 - LCG - LSG ( 京ICP备16042023号 | 京公网安备 11010502030087号 )

GMT+8, 2024-4-26 14:12

Powered by Discuz!

Copyright © 2001-2020, Tencent Cloud.

快速回复 返回顶部 返回列表