Skip to content

Commit

Permalink
[dynastyscans] improve metadata and filename formats
Browse files Browse the repository at this point in the history
  • Loading branch information
mikf committed Oct 10, 2017
1 parent 2ef3c35 commit 31ea600
Showing 1 changed file with 19 additions and 10 deletions.
29 changes: 19 additions & 10 deletions gallery_dl/extractor/dynastyscans.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"""Extract manga-chapters from https://dynasty-scans.com/"""

from .common import Extractor, Message
from .. import text
from .. import text, util
import re
import json

Expand All @@ -18,19 +18,21 @@ class DynastyscansChapterExtractor(Extractor):
"""Extractor for manga-chapters from dynasty-scans.com"""
category = "dynastyscans"
subcategory = "chapter"
directory_fmt = ["{category}", "{manga}", "c{chapter:>03} - {title}"]
filename_fmt = "{manga}_c{chapter:>03}_{page:>03}.{extension}"
directory_fmt = [
"{category}", "{manga}", "c{chapter:>03}{chapter_minor}{title:?: //}"]
filename_fmt = (
"{manga}_c{chapter:>03}{chapter_minor}_{page:>03}.{extension}")
pattern = [r"(?:https?://)?(?:www\.)?dynasty-scans\.com/chapters/([^/]+)"]
test = [
(("http://dynasty-scans.com/chapters/"
"hitoribocchi_no_oo_seikatsu_ch33"), {
"url": "ff79ea9956522a8dafd261c1fbe3c74aa8470dc5",
"keyword": "eb837477565ee7e647d08ae0ac60c1108234cb80",
"keyword": "fb2f470b995df5b301ccede31ed9829a010236db",
}),
(("http://dynasty-scans.com/chapters/"
"new_game_the_spinoff_special_13"), {
"url": "2cd5e04bd16f842dc884c145a44cf0c64ec27a21",
"keyword": "93b75d0c0aaeb849c99f2225a4b97f466bc3ace9",
"keyword": "281bbe0fb74b812ced595619ca5876983490dc0e",
}),
]
url_base = "https://dynasty-scans.com/"
Expand All @@ -47,27 +49,34 @@ def items(self):
data["count"] = len(imgs)
yield Message.Version, 1
yield Message.Directory, data
for num, img in enumerate(imgs, 1):
for data["page"], img in enumerate(imgs, 1):
url = self.url_base + img["image"]
text.nameext_from_url(url, data)
data["page"] = num
data["name"] = img["name"]
yield Message.Url, url, data

def get_job_metadata(self, page):
"""Collect metadata for extractor-job"""
info , pos = text.extract(page, "<h3 id='chapter-title'><b>", "</b>")
author, pos = text.extract(page, " by ", "</a>", pos)
group , pos = text.extract(page, '"icon-print"></i> ', '</span>', pos)
date , pos = text.extract(page, '"icon-calendar"></i> ', '<', pos)

match = re.match(
r"(?:<a [^>]+>)?([^<]+)(?:</a>)?(?: ch(\d+))?(?:: (.+))?",
(r"(?:<a[^>]*>)?([^<]+)(?:</a>)?" # manga name
r"(?: ch(\d+)([^:<]*))?" # chapter info
r"(?:: (.+))?"), # title
info
)

return {
"manga": text.unescape(match.group(1)),
"chapter": match.group(2) or "",
"title": text.unescape(match.group(3) or ""),
"chapter": util.safe_int(match.group(2)),
"chapter_minor": match.group(3) or "",
"title": text.unescape(match.group(4) or ""),
"author": text.remove_html(author),
"group": (text.remove_html(group) or
text.extract(group, ' alt="', '"')[0] or ""),
"date": date,
"lang": "en",
"language": "English",
Expand Down

0 comments on commit 31ea600

Please sign in to comment.