This commit is contained in:
parent
4559635102
commit
a7b5c0b786
15 changed files with 164 additions and 53 deletions
|
@ -15,16 +15,20 @@ to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|||
|
||||
- Metadata is now added to each chapter. Schema
|
||||
standard: [https://anansi-project.github.io/docs/comicinfo/schemas/v2.0](https://anansi-project.github.io/docs/comicinfo/schemas/v2.0)
|
||||
- `xmltodict` as a package requirement
|
||||
- Added `xmltodict` as a package requirement
|
||||
- Cache now also saves the manga title
|
||||
- New tests
|
||||
|
||||
### Fixed
|
||||
|
||||
- API template typos
|
||||
- Some useless type annotations
|
||||
|
||||
### Changed
|
||||
|
||||
- Simplified the chapter info generation
|
||||
- Updated the license year
|
||||
- Updated the API template
|
||||
|
||||
## [2.2.20] - 2023-02-12
|
||||
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2022 Ivan Schaller
|
||||
Copyright (c) 2021-2023 Ivan Schaller
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
@ -22,9 +22,10 @@ class YourAPI:
|
|||
api_base_url = "https://api.mangadex.org"
|
||||
img_base_url = "https://uploads.mangadex.org"
|
||||
|
||||
# get infos to initiate class
|
||||
def __init__(self, url_uuid, language, forcevol):
|
||||
# static info
|
||||
"""
|
||||
get infos to initiate class
|
||||
"""
|
||||
self.api_name = "Your API Name"
|
||||
|
||||
self.url_uuid = url_uuid
|
||||
|
@ -34,32 +35,49 @@ class YourAPI:
|
|||
# attributes needed by app.py
|
||||
self.manga_uuid = "abc"
|
||||
self.manga_title = "abc"
|
||||
self.chapter_list = ["1", "2"]
|
||||
|
||||
# methods needed by app.py
|
||||
# get chapter infos as a dictionary
|
||||
def get_manga_chapter_data(chapter: str) -> dict:
|
||||
# these keys have to be returned
|
||||
return {
|
||||
self.chapter_list = ["1", "2", "2.1", "5", "10"]
|
||||
self.manga_chapter_data = { # example data
|
||||
"1": {
|
||||
"uuid": "abc",
|
||||
"volume": "1",
|
||||
"chapter": "1",
|
||||
"name": "test",
|
||||
},
|
||||
"2": {
|
||||
"uuid": "abc",
|
||||
"volume": "1",
|
||||
"chapter": "2",
|
||||
"name": "test",
|
||||
},
|
||||
}
|
||||
# or with --forcevol
|
||||
return {
|
||||
self.manga_chapter_data = {
|
||||
"1:1": {
|
||||
"uuid": "abc",
|
||||
"volume": "1",
|
||||
"chapter": "1",
|
||||
"name": "test",
|
||||
},
|
||||
"1:2": {
|
||||
"uuid": "abc",
|
||||
"volume": "1",
|
||||
"chapter": "2",
|
||||
"name": "test",
|
||||
},
|
||||
}
|
||||
|
||||
# get chapter images as a list (full links)
|
||||
def get_chapter_images(chapter: str, download_wait: float) -> list:
|
||||
"""
|
||||
Get chapter images as a list (full links)
|
||||
|
||||
Args:
|
||||
chapter: The chapter number (chapter data index)
|
||||
download_wait: Wait time between image downloads
|
||||
|
||||
Returns:
|
||||
The list of urls of the page images
|
||||
"""
|
||||
|
||||
# example
|
||||
return [
|
||||
"https://abc.def/image/123.png",
|
||||
|
@ -67,8 +85,18 @@ class YourAPI:
|
|||
"https://abc.def/image/12345.png",
|
||||
]
|
||||
|
||||
# get metadata with correct keys for ComicInfo.xml
|
||||
def create_metadata(self, chapter: str) -> dict:
|
||||
"""
|
||||
Get metadata with correct keys for ComicInfo.xml
|
||||
Provide as much metadata as possible. empty/false values will be ignored
|
||||
|
||||
Args:
|
||||
chapter: The chapter number (chapter data index)
|
||||
|
||||
Returns:
|
||||
The metadata as a dict
|
||||
"""
|
||||
|
||||
# example
|
||||
return {
|
||||
"Volume": "abc",
|
||||
|
|
|
@ -138,7 +138,6 @@ class Mangadex:
|
|||
"Error retrieving the chapters list. Did you specify a valid language code?"
|
||||
)
|
||||
raise exc
|
||||
else:
|
||||
if total_chapters == 0:
|
||||
log.error("No chapters available to download in specified language")
|
||||
raise KeyError
|
||||
|
@ -272,9 +271,10 @@ class Mangadex:
|
|||
"Volume": chapter_data["volume"],
|
||||
"Number": chapter_data["chapter"],
|
||||
"PageCount": chapter_data["pages"],
|
||||
"Title": chapter_data["name"],
|
||||
"Series": self.manga_title,
|
||||
"Count": len(self.manga_chapter_data),
|
||||
"LanguageISO": self.language,
|
||||
"Title": self.manga_title,
|
||||
"Summary": self.manga_data["attributes"]["description"].get("en"),
|
||||
"Genre": self.manga_data["attributes"].get("publicationDemographic"),
|
||||
"Web": f"https://mangadex.org/title/{self.manga_uuid}",
|
||||
|
|
|
@ -5,6 +5,10 @@ from loguru import logger as log
|
|||
|
||||
|
||||
def write_metadata(chapter_path: Path, metadata: dict) -> None:
|
||||
if metadata["Format"] == "pdf":
|
||||
log.warning("Can't add metadata for pdf format. Skipping")
|
||||
return
|
||||
|
||||
try:
|
||||
metadata_template = Path("mangadlp/metadata/ComicInfo.xml").read_text(
|
||||
encoding="utf8"
|
||||
|
@ -27,5 +31,6 @@ def write_metadata(chapter_path: Path, metadata: dict) -> None:
|
|||
log.debug(f"Updating metadata: '{key}' = '{value}'")
|
||||
metadata_empty["ComicInfo"][key] = value
|
||||
|
||||
metadata_export = xmltodict.unparse(metadata_empty, pretty=True, indent=(" " * 4))
|
||||
metadata_export = xmltodict.unparse(metadata_empty, pretty=True, indent=" " * 4)
|
||||
metadata_file.touch(exist_ok=True)
|
||||
metadata_file.write_text(metadata_export, encoding="utf8")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<ComicInfo>
|
||||
<Title></Title>
|
||||
<Series></Series>
|
||||
<Number></Number>
|
||||
<Count></Count>
|
||||
<Volume></Volume>
|
||||
|
|
|
@ -9,7 +9,7 @@ from loguru import logger as log
|
|||
|
||||
# create an archive of the chapter images
|
||||
def make_archive(chapter_path: Path, file_format: str) -> None:
|
||||
zip_path: Path = Path(f"{chapter_path}.zip")
|
||||
zip_path = Path(f"{chapter_path}.zip")
|
||||
try:
|
||||
# create zip
|
||||
with ZipFile(zip_path, "w") as zipfile:
|
||||
|
@ -29,7 +29,7 @@ def make_pdf(chapter_path: Path) -> None:
|
|||
log.error("Cant import img2pdf. Please install it first")
|
||||
raise exc
|
||||
|
||||
pdf_path: Path = Path(f"{chapter_path}.pdf")
|
||||
pdf_path = Path(f"{chapter_path}.pdf")
|
||||
images: list[str] = []
|
||||
for file in chapter_path.iterdir():
|
||||
images.append(str(file))
|
||||
|
|
20
tests/ComicInfo_test.xml
Normal file
20
tests/ComicInfo_test.xml
Normal file
|
@ -0,0 +1,20 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<ComicInfo>
|
||||
<Title>title1</Title>
|
||||
<Series>series1</Series>
|
||||
<Number>2</Number>
|
||||
<Count>10</Count>
|
||||
<Volume>1</Volume>
|
||||
<Summary>summary1</Summary>
|
||||
<Genre>genre1</Genre>
|
||||
<Web>https://mangadex.org</Web>
|
||||
<PageCount>99</PageCount>
|
||||
<LanguageISO>en</LanguageISO>
|
||||
<Format></Format>
|
||||
<ScanInformation></ScanInformation>
|
||||
<SeriesGroup></SeriesGroup>
|
||||
<BlackAndWhite>Unknown</BlackAndWhite>
|
||||
<Manga>Yes</Manga>
|
||||
<AgeRating>Unknown</AgeRating>
|
||||
<Notes>Downloaded with https://github.com/olofvndrhr/manga-dlp</Notes>
|
||||
</ComicInfo>
|
|
@ -3,8 +3,7 @@ from pathlib import Path
|
|||
|
||||
import pytest
|
||||
|
||||
import mangadlp.app as app
|
||||
import mangadlp.utils as utils
|
||||
from mangadlp import app, utils
|
||||
|
||||
|
||||
def test_make_archive_true():
|
||||
|
|
|
@ -4,7 +4,7 @@ from pathlib import Path
|
|||
import pytest
|
||||
import requests
|
||||
|
||||
import mangadlp.downloader as downloader
|
||||
from mangadlp import downloader
|
||||
|
||||
|
||||
def test_downloader():
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
import mangadlp.cli as mdlpinput
|
||||
|
||||
|
|
|
@ -6,27 +6,28 @@ from mangadlp.cache import CacheDB
|
|||
|
||||
def test_cache_creation():
|
||||
cache_file = Path("cache.json")
|
||||
cache = CacheDB(cache_file, "abc", "en")
|
||||
cache = CacheDB(cache_file, "abc", "en", "test")
|
||||
|
||||
assert cache_file.exists() and cache_file.read_text(encoding="utf8") == "{}"
|
||||
assert cache_file.exists()
|
||||
cache_file.unlink()
|
||||
|
||||
|
||||
def test_cache_insert():
|
||||
cache_file = Path("cache.json")
|
||||
cache = CacheDB(cache_file, "abc", "en")
|
||||
cache = CacheDB(cache_file, "abc", "en", "test")
|
||||
cache.add_chapter("1")
|
||||
cache.add_chapter("2")
|
||||
|
||||
cache_data = json.loads(cache_file.read_text(encoding="utf8"))
|
||||
|
||||
assert cache_data["abc__en"]["chapters"] == ["1", "2"]
|
||||
assert cache_data["abc__en"]["name"] == "test"
|
||||
cache_file.unlink()
|
||||
|
||||
|
||||
def test_cache_update():
|
||||
cache_file = Path("cache.json")
|
||||
cache = CacheDB(cache_file, "abc", "en")
|
||||
cache = CacheDB(cache_file, "abc", "en", "test")
|
||||
cache.add_chapter("1")
|
||||
cache.add_chapter("2")
|
||||
|
||||
|
@ -43,29 +44,31 @@ def test_cache_update():
|
|||
|
||||
def test_cache_multiple():
|
||||
cache_file = Path("cache.json")
|
||||
cache1 = CacheDB(cache_file, "abc", "en")
|
||||
cache1 = CacheDB(cache_file, "abc", "en", "test")
|
||||
cache1.add_chapter("1")
|
||||
cache1.add_chapter("2")
|
||||
|
||||
cache2 = CacheDB(cache_file, "def", "en")
|
||||
cache2 = CacheDB(cache_file, "def", "en", "test2")
|
||||
cache2.add_chapter("8")
|
||||
cache2.add_chapter("9")
|
||||
|
||||
cache_data = json.loads(cache_file.read_text(encoding="utf8"))
|
||||
|
||||
assert cache_data["abc__en"]["chapters"] == ["1", "2"]
|
||||
assert cache_data["abc__en"]["name"] == "test"
|
||||
assert cache_data["def__en"]["chapters"] == ["8", "9"]
|
||||
assert cache_data["def__en"]["name"] == "test2"
|
||||
|
||||
cache_file.unlink()
|
||||
|
||||
|
||||
def test_cache_lang():
|
||||
cache_file = Path("cache.json")
|
||||
cache1 = CacheDB(cache_file, "abc", "en")
|
||||
cache1 = CacheDB(cache_file, "abc", "en", "test")
|
||||
cache1.add_chapter("1")
|
||||
cache1.add_chapter("2")
|
||||
|
||||
cache2 = CacheDB(cache_file, "abc", "de")
|
||||
cache2 = CacheDB(cache_file, "abc", "de", "test")
|
||||
cache2.add_chapter("8")
|
||||
cache2.add_chapter("9")
|
||||
|
||||
|
|
31
tests/test_07_metadata.py
Normal file
31
tests/test_07_metadata.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
from pathlib import Path
|
||||
|
||||
from mangadlp.metadata import write_metadata
|
||||
|
||||
|
||||
def test_metadata_creation():
|
||||
test_metadata_file = Path("tests/ComicInfo_test.xml")
|
||||
metadata_path = Path("tests/")
|
||||
metadata_file = Path("tests/ComicInfo.xml")
|
||||
metadata = {
|
||||
"Volume": "1",
|
||||
"Number": "2",
|
||||
"PageCount": "99",
|
||||
"Count": "10",
|
||||
"LanguageISO": "en",
|
||||
"Title": "title1",
|
||||
"Series": "series1",
|
||||
"Summary": "summary1",
|
||||
"Genre": "genre1",
|
||||
"Web": "https://mangadex.org",
|
||||
}
|
||||
|
||||
write_metadata(metadata_path, metadata)
|
||||
assert metadata_file.exists()
|
||||
|
||||
read_in_metadata = metadata_file.read_text(encoding="utf8")
|
||||
test_metadata = test_metadata_file.read_text(encoding="utf8")
|
||||
assert test_metadata == read_in_metadata
|
||||
|
||||
# cleanup
|
||||
metadata_file.unlink()
|
|
@ -64,7 +64,7 @@ def test_chapter_infos():
|
|||
language = "en"
|
||||
forcevol = False
|
||||
test = Mangadex(url_uuid, language, forcevol)
|
||||
chapter_infos = test.get_chapter_infos("1")
|
||||
chapter_infos = test.manga_chapter_data["1"]
|
||||
chapter_uuid = chapter_infos["uuid"]
|
||||
chapter_name = chapter_infos["name"]
|
||||
chapter_num = chapter_infos["chapter"]
|
||||
|
@ -239,3 +239,24 @@ def test_get_chapter_images_error(monkeypatch):
|
|||
monkeypatch.setattr(requests, "get", fail_url)
|
||||
|
||||
assert not test.get_chapter_images(chapter_num, 2)
|
||||
|
||||
|
||||
def test_chapter_metadata():
|
||||
url_uuid = "https://mangadex.org/title/a96676e5-8ae2-425e-b549-7f15dd34a6d8/komi-san-wa-komyushou-desu"
|
||||
language = "en"
|
||||
forcevol = False
|
||||
test = Mangadex(url_uuid, language, forcevol)
|
||||
chapter_metadata = test.create_metadata("1")
|
||||
manga_name = chapter_metadata["Series"]
|
||||
chapter_name = chapter_metadata["Title"]
|
||||
chapter_num = chapter_metadata["Number"]
|
||||
chapter_volume = chapter_metadata["Volume"]
|
||||
chapter_url = chapter_metadata["Web"]
|
||||
|
||||
assert (manga_name, chapter_name, chapter_volume, chapter_num, chapter_url) == (
|
||||
"Komi-san wa Komyushou Desu",
|
||||
"A Normal Person",
|
||||
"1",
|
||||
"1",
|
||||
"https://mangadex.org/title/a96676e5-8ae2-425e-b549-7f15dd34a6d8",
|
||||
)
|
||||
|
|
|
@ -6,7 +6,7 @@ from pathlib import Path
|
|||
|
||||
import pytest
|
||||
|
||||
import mangadlp.app as app
|
||||
from mangadlp import app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -107,12 +107,14 @@ def test_full_with_input_folder(wait_20s):
|
|||
download_path = "tests"
|
||||
manga_path = Path("tests/Shikimori's Not Just a Cutie")
|
||||
chapter_path = Path("tests/Shikimori's Not Just a Cutie/Ch. 1")
|
||||
metadata_path = Path("tests/Shikimori's Not Just a Cutie/ComicInfo.xml")
|
||||
command_args = f"-u {url_uuid} -l {language} -c {chapters} --path {download_path} --format '{file_format}' --debug --wait 2"
|
||||
script_path = "manga-dlp.py"
|
||||
os.system(f"python3 {script_path} {command_args}")
|
||||
|
||||
assert manga_path.exists() and manga_path.is_dir()
|
||||
assert chapter_path.exists() and chapter_path.is_dir()
|
||||
assert metadata_path.exists() and metadata_path.is_file()
|
||||
# cleanup
|
||||
shutil.rmtree(manga_path, ignore_errors=True)
|
||||
|
||||
|
|
Loading…
Reference in a new issue