-
-
Notifications
You must be signed in to change notification settings - Fork 300
/
novelupdatescc.py
69 lines (56 loc) · 2.29 KB
/
novelupdatescc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# -*- coding: utf-8 -*-
import logging
from urllib.parse import quote
from lncrawl.core.crawler import Crawler
from lncrawl.models.chapter import Chapter
logger = logging.getLogger(__name__)
search_url = "https://www.novelupdates.cc/search/%s/1"
class NovelUpdatesCC(Crawler):
base_url = [
"https://www.novelupdates.cc/",
]
def search_novel(self, query):
query = quote(query.lower())
soup = self.get_soup(search_url % query)
results = []
for li in soup.select(".result-list .list-item"):
a = li.select_one("a.book-name")
for bad in a.select("font"):
bad.extract()
catalog = li.select_one(".book-catalog").text.strip()
votes = li.select_one(".star-suite .score").text.strip()
results.append(
{
"title": a.text.strip(),
"url": self.absolute_url(a["href"]),
"info": "%s | Rating: %s" % (catalog, votes),
}
)
return results
def read_novel_info(self):
logger.debug("Visiting %s", self.novel_url)
soup = self.get_soup(self.novel_url)
possible_title = soup.select_one(".book-name")
assert possible_title, "No novel title"
self.novel_title = possible_title.text.strip()
logger.info("Novel title: %s", self.novel_title)
possible_author = soup.select_one(".person-info .author .name")
if possible_author:
self.novel_author = possible_author.text.strip()
logger.info("Novel author: %s", self.novel_author)
possible_image = soup.select_one(".book-img img[src]")
if possible_image:
self.novel_cover = self.absolute_url(possible_image["src"])
logger.info("Novel cover: %s", self.novel_cover)
for a in soup.select("ul.chapter-list a[href]"):
self.chapters.append(
Chapter(
id=len(self.chapters) + 1,
title=a.text.strip(),
url=self.absolute_url(a["href"]),
)
)
def download_chapter_body(self, chapter):
soup = self.get_soup(chapter["url"])
content = soup.select_one("#chapter-entity")
return self.cleaner.extract_contents(content)