[2.3.1] - 2023-03-12 #41

Merged
olofvndrhr merged 19 commits from dev into master 2023-03-12 04:47:37 +01:00
28 changed files with 531 additions and 450 deletions

View file

@ -26,7 +26,7 @@ pipeline:
branch: master
event: pull_request
commands:
- python3 -m hatch build --clean
- just test_build
# create release-notes
test-create-release-notes:

View file

@ -26,4 +26,4 @@ pipeline:
branch: master
event: pull_request
commands:
- python3 -m tox
- just test_tox

View file

@ -29,4 +29,4 @@ pipeline:
- grep -v img2pdf contrib/requirements_dev.txt > contrib/requirements_dev_arm64.txt
- rm -f contrib/requirements_dev.txt
- mv contrib/requirements_dev_arm64.txt contrib/requirements_dev.txt
- python3 -m tox
- just test_tox

View file

@ -17,51 +17,29 @@ pipeline:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- shfmt -d -i 4 -bn -ci -sr .
- just test_shfmt
# check code style - python
test-black:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m black --check --diff .
# check imports - python
test-isort:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m isort --check-only --diff .
# check unused and missing imports - python
test-autoflake:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m autoflake --remove-all-unused-imports -r -v mangadlp/
- python3 -m autoflake --check --remove-all-unused-imports -r -v mangadlp/
- just test_black
# check static typing - python
test-mypy:
test-pyright:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m mypy --install-types --non-interactive mangadlp/
- just install_deps
- just test_pyright
# mccabe, pycodestyle, pyflakes tests - python
test-pylama:
# ruff test - python
test-ruff:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m pylama mangadlp/
# pylint test - python
test-pylint:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m pip install -r requirements.txt
- python3 -m pylint --fail-under 9 mangadlp/
- just test_ruff
# test mkdocs generation
test-mkdocs:
@ -72,14 +50,14 @@ pipeline:
- cd docs || exit 1
- python3 -m mkdocs build --strict
# test code with different python versions - python
# test code with pytest - python
test-tox-pytest:
when:
event: [ push ]
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m tox -e basic
- just test_pytest
# generate coverage report - python
test-tox-coverage:
@ -89,7 +67,7 @@ pipeline:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m tox -e coverage
- just test_coverage
# analyse code with sonarqube and upload it
sonarqube-analysis:

View file

@ -9,6 +9,22 @@ to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
- Add support for more sites
## [2.3.1] - 2023-03-12
### Added
- Added TypedDicts for type checkers and type annotation
### Fixed
- Fixed some typos in the README
### Changed
- Switched from pylint/pylama/isort/autoflake to ruff
- Switched from mypy to pyright and added strict type checking
- Updated the api template
## [2.3.0] - 2023-02-15
### Added

View file

@ -19,31 +19,43 @@ Code Analysis
Meta
[![Code style](https://img.shields.io/badge/code%20style-black-black)](https://github.com/psf/black)
[![Linter](https://img.shields.io/badge/linter-pylint-yellowgreen)](https://pylint.pycqa.org/en/latest/)
[![Types](https://img.shields.io/badge/types-mypy-blue)](https://github.com/python/mypy)
[![Imports](https://img.shields.io/badge/imports-isort-ef8336.svg)](https://github.com/pycqa/isort)
[![Linter](https://img.shields.io/badge/linter-ruff-red)](https://github.com/charliermarsh/ruff)
[![Types](https://img.shields.io/badge/types-pyright-blue)](https://github.com/microsoft/pyright)
[![Tests](https://img.shields.io/badge/tests-pytest%20%7C%20tox-yellow)](https://github.com/pytest-dev/pytest/)
[![Coverage](https://img.shields.io/badge/coverage-coveragepy-green)](https://github.com/nedbat/coveragepy)
[![License](https://img.shields.io/badge/license-MIT-9400d3.svg)](https://snyk.io/learn/what-is-mit-license/)
[![Compatibility](https://img.shields.io/pypi/pyversions/manga-dlp)](https://pypi.org/project/manga-dlp/)
---
## Description
A manga download script written in python. It only supports [mangadex.org](https://mangadex.org/) for now. But support
for other sites is planned.
for other sites is _planned™_.
Before downloading a new chapter, the script always checks if there is already a chapter with the same name in the
download directory. If found the chapter is skipped. So you can run the script on a schedule to only download new
chapters without any additional setup.
The default behaiviour is to pack the images to a [cbz archive](https://en.wikipedia.org/wiki/Comic_book_archive). If
you just want the folder with all the pictures use the flag `--nocbz`.
you just want the folder with all the pictures use the flag `--format ""`.
## _Currently_ Supported sites
- [Mangadex.org](https://mangadex.org/)
## Features (not complete)
- Metadata support with [ComicInfo.xml](https://anansi-project.github.io/docs/comicinfo/intro)
- Json caching
- Custom hooks after/before each download
- Custom chapter name format
- Volume support
- Multiple archive formats supported (cbz,cbr,zip,none)
- Language selection
- Download all chapters directly
- And others...
## Usage
### Quick start
@ -124,18 +136,18 @@ verbosity: [mutually_exclusive]
For suggestions for improvement, just open a pull request.
If you want to add support for a new site, there is an api [template file](./contrib/api_template.py) which you can use.
And more infos and tools in the contrib [README.md](contrib/README.md)
If you want to add support for a new site, there is an api [template file](contrib/api_template.py) which you can use.
And more infos and tools are in the contrib [README.md](contrib/README.md)
Otherwise, you can open am issue with the name of the site which you want support for. (not guaranteed to be
implemented)
Otherwise, you can open an issue with the name of the site which you want support for (not guaranteed to be
implemented).
If you encounter any bugs, also just open an issue with a description of the problem.
## TODO's
- <del>Make docker container for easy distribution</del>
--> [Dockerhub](https://hub.docker.com/repository/docker/olofvndrhr/manga-dlp)
--> [Dockerhub](https://hub.docker.com/r/olofvndrhr/manga-dlp)
- <del>Automate release</del>
--> Done with woodpecker-ci
- <del>Make pypi package</del>

View file

@ -1,9 +1,14 @@
from typing import Dict, List, Union
from mangadlp.types import ChapterData,ComicInfo
# api template for manga-dlp
class YourAPI:
"""Your API Class.
Get infos for a manga from example.org
Get infos for a manga from example.org.
Args:
url_uuid (str): URL or UUID of the manga
@ -22,10 +27,8 @@ class YourAPI:
api_base_url = "https://api.mangadex.org"
img_base_url = "https://uploads.mangadex.org"
def __init__(self, url_uuid, language, forcevol):
"""
get infos to initiate class
"""
def __init__(self, url_uuid: str, language: str, forcevol: bool):
"""get infos to initiate class."""
self.api_name = "Your API Name"
self.url_uuid = url_uuid
@ -36,22 +39,24 @@ class YourAPI:
self.manga_uuid = "abc"
self.manga_title = "abc"
self.chapter_list = ["1", "2", "2.1", "5", "10"]
self.manga_chapter_data = { # example data
self.manga_chapter_data: Dict[str, ChapterData] = { # example data
"1": {
"uuid": "abc",
"volume": "1",
"chapter": "1",
"name": "test",
"pages" 2,
},
"2": {
"uuid": "abc",
"volume": "1",
"chapter": "2",
"name": "test",
"pages": 45,
},
}
# or with --forcevol
self.manga_chapter_data = {
self.manga_chapter_data: Dict[str, ChapterData] = {
"1:1": {
"uuid": "abc",
"volume": "1",
@ -66,9 +71,8 @@ class YourAPI:
},
}
def get_chapter_images(chapter: str, download_wait: float) -> list:
"""
Get chapter images as a list (full links)
def get_chapter_images(self, chapter: str, wait_time: float) -> List[str]:
"""Get chapter images as a list (full links).
Args:
chapter: The chapter number (chapter data index)
@ -77,7 +81,6 @@ class YourAPI:
Returns:
The list of urls of the page images
"""
# example
return [
"https://abc.def/image/123.png",
@ -85,10 +88,10 @@ class YourAPI:
"https://abc.def/image/12345.png",
]
def create_metadata(self, chapter: str) -> dict:
"""
Get metadata with correct keys for ComicInfo.xml
Provide as much metadata as possible. empty/false values will be ignored
def create_metadata(self, chapter: str) -> ComicInfo:
"""Get metadata with correct keys for ComicInfo.xml.
Provide as much metadata as possible. empty/false values will be ignored.
Args:
chapter: The chapter number (chapter data index)
@ -96,7 +99,6 @@ class YourAPI:
Returns:
The metadata as a dict
"""
# metadata types. have to be valid
# {key: (type, default value, valid values)}
{
@ -155,7 +157,7 @@ class YourAPI:
# example
return {
"Volume": "abc",
"Volume": 1,
"LanguageISO": "en",
"Title": "test",
}

View file

@ -14,9 +14,7 @@ hatchling>=1.11.0
pytest>=7.0.0
coverage>=6.3.1
black>=22.1.0
isort>=5.10.0
pylint>=2.13.0
mypy>=0.940
tox>=3.24.5
autoflake>=1.4
pylama>=8.3.8
ruff>=0.0.247
pyright>=1.1.294

View file

@ -17,31 +17,43 @@ Code Analysis
Meta
[![Code style](https://img.shields.io/badge/code%20style-black-black)](https://github.com/psf/black)
[![Linter](https://img.shields.io/badge/linter-pylint-yellowgreen)](https://pylint.pycqa.org/en/latest/)
[![Types](https://img.shields.io/badge/types-mypy-blue)](https://github.com/python/mypy)
[![Imports](https://img.shields.io/badge/imports-isort-ef8336.svg)](https://github.com/pycqa/isort)
[![Linter](https://img.shields.io/badge/linter-ruff-red)](https://github.com/charliermarsh/ruff)
[![Types](https://img.shields.io/badge/types-pyright-blue)](https://github.com/microsoft/pyright)
[![Tests](https://img.shields.io/badge/tests-pytest%20%7C%20tox-yellow)](https://github.com/pytest-dev/pytest/)
[![Coverage](https://img.shields.io/badge/coverage-coveragepy-green)](https://github.com/nedbat/coveragepy)
[![License](https://img.shields.io/badge/license-MIT-9400d3.svg)](https://snyk.io/learn/what-is-mit-license/)
[![Compatibility](https://img.shields.io/pypi/pyversions/manga-dlp)](https://pypi.org/project/manga-dlp/)
---
## Description
A manga download script written in python. It only supports [mangadex.org](https://mangadex.org/) for now. But support
for other sites is planned.
for other sites is _planned™_.
Before downloading a new chapter, the script always checks if there is already a chapter with the same name in the
download directory. If found the chapter is skipped. So you can run the script on a schedule to only download new
chapters without any additional setup.
The default behaiviour is to pack the images to a [cbz archive](https://en.wikipedia.org/wiki/Comic_book_archive). If
you just want the folder with all the pictures use the flag `--nocbz`.
you just want the folder with all the pictures use the flag `--format ""`.
## _Currently_ Supported sites
- [Mangadex.org](https://mangadex.org/)
## Features (not complete)
- Metadata support with [ComicInfo.xml](https://anansi-project.github.io/docs/comicinfo/intro)
- Json caching
- Custom hooks after/before each download
- Custom chapter name format
- Volume support
- Multiple archive formats supported (cbz,cbr,zip,none)
- Language selection
- Download all chapters directly
- And others...
## Usage
### Quick start
@ -82,7 +94,7 @@ mangadlp <args> # call script directly
### With docker
See the docker [README](docker/)
See the docker [README](https://manga-dlp.ivn.sh/docker/)
## Options
@ -122,20 +134,18 @@ verbosity: [mutually_exclusive]
For suggestions for improvement, just open a pull request.
If you want to add support for a new site, there is an
api [template file](https://github.com/olofvndrhr/manga-dlp/blob/master/contrib/api_template.py) which you can use.
And more infos and tools in the
contrib [README.md](https://github.com/olofvndrhr/manga-dlp/blob/master/contrib/README.md)
If you want to add support for a new site, there is an api [template file](https://github.com/olofvndrhr/manga-dlp/tree/master/contrib/api_template.py) which you can use.
And more infos and tools are in the contrib [README.md](https://github.com/olofvndrhr/manga-dlp/tree/master/contrib/README.md)
Otherwise, you can open am issue with the name of the site which you want support for. (not guaranteed to be
implemented)
Otherwise, you can open an issue with the name of the site which you want support for (not guaranteed to be
implemented).
If you encounter any bugs, also just open an issue with a description of the problem.
## TODO's
- <del>Make docker container for easy distribution</del>
--> [Dockerhub](https://hub.docker.com/repository/docker/olofvndrhr/manga-dlp)
--> [Dockerhub](https://hub.docker.com/r/olofvndrhr/manga-dlp)
- <del>Automate release</del>
--> Done with woodpecker-ci
- <del>Make pypi package</del>

View file

@ -68,45 +68,39 @@ create_venv:
@python3 -m venv venv
install_deps:
@echo "installing dependencies"
@pip3 install -r requirements.txt
install_deps_dev:
@echo "installing dependencies"
@pip3 install -r contrib/requirements_dev.txt
test_shfmt:
@find . -type f \( -name "**.sh" -and -not -path "./venv/*" -and -not -path "./.tox/*" \) -exec shfmt -d -i 4 -bn -ci -sr "{}" \+;
@find . -type f \( -name "**.sh" -and -not -path "./.**" -and -not -path "./venv**" \) -exec shfmt -d -i 4 -bn -ci -sr "{}" \+;
test_black:
@python3 -m black --check --diff .
@python3 -m black --check --diff mangadlp/
test_isort:
@python3 -m isort --check-only --diff .
test_pyright:
@python3 -m pyright mangadlp/
test_mypy:
@python3 -m mypy --install-types --non-interactive mangadlp/
test_ruff:
@python3 -m ruff --diff mangadlp/
test_ci_conf:
@woodpecker-cli lint .woodpecker/
test_pytest:
@python3 -m tox -e basic
test_autoflake:
@python3 -m autoflake --remove-all-unused-imports -r -v mangadlp/
@python3 -m autoflake --check --remove-all-unused-imports -r -v mangadlp/
test_pylama:
@python3 -m pylama --options tox.ini mangadlp/
test_pylint:
@python3 -m pylint --fail-under 9 mangadlp/
test_coverage:
@python3 -m tox -e coverage
test_tox:
@python3 -m tox
test_tox_coverage:
@python3 -m tox -e coverage
test_build:
@python3 -m hatch build
test_ci_conf:
@woodpecker-cli lint .woodpecker/
@python3 -m hatch build --clean
test_docker_build:
@docker build . -f docker/Dockerfile.amd64 -t manga-dlp:test
@ -123,11 +117,8 @@ lint:
-just test_ci_conf
just test_shfmt
just test_black
just test_isort
just test_mypy
just test_autoflake
just test_pylama
just test_pylint
just test_pyright
just test_ruff
@echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n"
tests:
@ -135,11 +126,8 @@ tests:
-just test_ci_conf
just test_shfmt
just test_black
just test_isort
just test_mypy
just test_autoflake
just test_pylama
just test_pylint
just test_pyright
just test_ruff
just test_pytest
@echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n"
@ -148,13 +136,10 @@ tests_full:
-just test_ci_conf
just test_shfmt
just test_black
just test_isort
just test_mypy
just test_autoflake
just test_pylama
just test_pylint
just test_pyright
just test_ruff
just test_build
just test_tox
just test_tox_coverage
just test_coverage
just test_docker_build
@echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n"

View file

@ -1 +1 @@
__version__ = "2.3.0"
__version__ = "2.3.1"

View file

@ -1,15 +1,18 @@
import re
from time import sleep
from typing import Any, Dict, List
import requests
from loguru import logger as log
from mangadlp import utils
from mangadlp.types import ChapterData, ComicInfo
class Mangadex:
"""Mangadex API Class.
Get infos for a manga from mangadex.org
Get infos for a manga from mangadex.org.
Args:
url_uuid (str): URL or UUID of the manga
@ -64,10 +67,10 @@ class Mangadex:
log.error("No valid UUID found")
raise exc
return uuid
return uuid # pyright:ignore
# make initial request
def get_manga_data(self) -> dict:
def get_manga_data(self) -> Dict[str, Any]:
log.debug(f"Getting manga data for: {self.manga_uuid}")
counter = 1
while counter <= 3:
@ -84,12 +87,14 @@ class Mangadex:
counter += 1
else:
break
response_body: Dict[str, Dict[str, Any]] = response.json() # pyright:ignore
# check if manga exists
if response.json()["result"] != "ok":
if response_body["result"] != "ok": # type:ignore
log.error("Manga not found")
raise KeyError
return response.json()["data"]
return response_body["data"]
# get the title of the manga (and fix the filename)
def get_manga_title(self) -> str:
@ -111,7 +116,7 @@ class Mangadex:
if item.get(self.language):
alt_title = item
break
title = alt_title[self.language]
title = alt_title[self.language] # pyright:ignore
except (KeyError, UnboundLocalError):
log.warning(
"Manga title also not found in alt titles. Falling back to english title"
@ -132,7 +137,7 @@ class Mangadex:
timeout=10,
)
try:
total_chapters = r.json()["total"]
total_chapters: int = r.json()["total"]
except Exception as exc:
log.error(
"Error retrieving the chapters list. Did you specify a valid language code?"
@ -146,13 +151,13 @@ class Mangadex:
return total_chapters
# get chapter data like name, uuid etc
def get_chapter_data(self) -> dict:
def get_chapter_data(self) -> Dict[str, ChapterData]:
log.debug(f"Getting chapter data for: {self.manga_uuid}")
api_sorting = "order[chapter]=asc&order[volume]=asc"
# check for chapters in specified lang
total_chapters = self.check_chapter_lang()
chapter_data = {}
chapter_data: dict[str, ChapterData] = {}
last_volume, last_chapter = ("", "")
offset = 0
while offset < total_chapters: # if more than 500 chapters
@ -160,8 +165,9 @@ class Mangadex:
f"{self.api_base_url}/manga/{self.manga_uuid}/feed?{api_sorting}&limit=500&offset={offset}&{self.api_additions}",
timeout=10,
)
for chapter in r.json()["data"]:
attributes: dict = chapter["attributes"]
response_body: Dict[str, Any] = r.json()
for chapter in response_body["data"]:
attributes: Dict[str, Any] = chapter["attributes"]
# chapter infos from feed
chapter_num: str = attributes.get("chapter") or ""
chapter_vol: str = attributes.get("volume") or ""
@ -203,7 +209,7 @@ class Mangadex:
return chapter_data
# get images for the chapter (mangadex@home)
def get_chapter_images(self, chapter: str, wait_time: float) -> list:
def get_chapter_images(self, chapter: str, wait_time: float) -> List[str]:
log.debug(f"Getting chapter images for: {self.manga_uuid}")
athome_url = f"{self.api_base_url}/at-home/server"
chapter_uuid = self.manga_chapter_data[chapter]["uuid"]
@ -237,11 +243,11 @@ class Mangadex:
if api_error:
return []
chapter_hash = api_data["chapter"]["hash"]
chapter_img_data = api_data["chapter"]["data"]
chapter_hash = api_data["chapter"]["hash"] # pyright:ignore
chapter_img_data = api_data["chapter"]["data"] # pyright:ignore
# get list of image urls
image_urls = []
image_urls: List[str] = []
for image in chapter_img_data:
image_urls.append(f"{self.img_base_url}/data/{chapter_hash}/{image}")
@ -250,9 +256,9 @@ class Mangadex:
return image_urls
# create list of chapters
def create_chapter_list(self) -> list:
def create_chapter_list(self) -> List[str]:
log.debug(f"Creating chapter list for: {self.manga_uuid}")
chapter_list = []
chapter_list: List[str] = []
for data in self.manga_chapter_data.values():
chapter_number: str = data["chapter"]
volume_number: str = data["volume"]
@ -263,15 +269,15 @@ class Mangadex:
return chapter_list
def create_metadata(self, chapter: str) -> dict:
def create_metadata(self, chapter: str) -> ComicInfo:
log.info("Creating metadata from api")
chapter_data = self.manga_chapter_data[chapter]
try:
volume = int(chapter_data.get("volume"))
volume = int(chapter_data["volume"])
except (ValueError, TypeError):
volume = None
metadata = {
metadata: ComicInfo = {
"Volume": volume,
"Number": chapter_data.get("chapter"),
"PageCount": chapter_data.get("pages"),

View file

@ -1,7 +1,7 @@
import re
import shutil
from pathlib import Path
from typing import Any, Union
from typing import Any, Dict, List, Tuple, Union
from loguru import logger as log
@ -10,11 +10,12 @@ from mangadlp.api.mangadex import Mangadex
from mangadlp.cache import CacheDB
from mangadlp.hooks import run_hook
from mangadlp.metadata import write_metadata
from mangadlp.types import ChapterData
from mangadlp.utils import get_file_format
def match_api(url_uuid: str) -> type:
"""Match the correct api class from a string
"""Match the correct api class from a string.
Args:
url_uuid: url/uuid to check
@ -22,9 +23,8 @@ def match_api(url_uuid: str) -> type:
Returns:
The class of the API to use
"""
# apis to check
apis: list[tuple[str, re.Pattern, type]] = [
apis: List[Tuple[str, re.Pattern[str], type]] = [
(
"mangadex.org",
re.compile(
@ -53,6 +53,7 @@ def match_api(url_uuid: str) -> type:
class MangaDLP:
"""Download Mangas from supported sites.
After initialization, start the script with the function get_manga().
Args:
@ -108,7 +109,7 @@ class MangaDLP:
self.chapter_post_hook_cmd = chapter_post_hook_cmd
self.cache_path = cache_path
self.add_metadata = add_metadata
self.hook_infos: dict = {}
self.hook_infos: Dict[str, Any] = {}
# prepare everything
self._prepare()
@ -226,7 +227,7 @@ class MangaDLP:
skipped_chapters: list[Any] = []
error_chapters: list[Any] = []
for chapter in chapters_to_download:
if self.cache_path and chapter in cached_chapters:
if self.cache_path and chapter in cached_chapters: # pyright:ignore
log.info(f"Chapter '{chapter}' is in cache. Skipping download")
continue
@ -240,7 +241,7 @@ class MangaDLP:
skipped_chapters.append(chapter)
# update cache
if self.cache_path:
cache.add_chapter(chapter)
cache.add_chapter(chapter) # pyright:ignore
continue
except Exception:
# skip download/packing due to an error
@ -273,7 +274,7 @@ class MangaDLP:
# update cache
if self.cache_path:
cache.add_chapter(chapter)
cache.add_chapter(chapter) # pyright:ignore
# start chapter post hook
run_hook(
@ -310,7 +311,7 @@ class MangaDLP:
# once called per chapter
def get_chapter(self, chapter: str) -> Path:
# get chapter infos
chapter_infos: dict = self.api.manga_chapter_data[chapter]
chapter_infos: ChapterData = self.api.manga_chapter_data[chapter]
log.debug(f"Chapter infos: {chapter_infos}")
# get image urls for chapter
@ -352,7 +353,7 @@ class MangaDLP:
log.debug(f"Filename: '{chapter_filename}'")
# set download path for chapter (image folder)
chapter_path = self.manga_path / chapter_filename
chapter_path: Path = self.manga_path / chapter_filename
# set archive path with file format
chapter_archive_path = Path(f"{chapter_path}{self.file_format}")

View file

@ -26,12 +26,14 @@ class CacheDB:
if not self.db_data.get(self.db_key):
self.db_data[self.db_key] = {}
self.db_uuid_data: dict = self.db_data[self.db_key]
self.db_uuid_data = self.db_data[self.db_key]
if not self.db_uuid_data.get("name"):
self.db_uuid_data.update({"name": self.name})
self._write_db()
self.db_uuid_chapters: list = self.db_uuid_data.get("chapters") or []
self.db_uuid_chapters: List[str] = (
self.db_uuid_data.get("chapters") or [] # type:ignore
)
def _prepare_db(self) -> None:
if self.db_path.exists():
@ -44,11 +46,11 @@ class CacheDB:
log.error("Can't create db-file")
raise exc
def _read_db(self) -> Dict[str, dict]:
def _read_db(self) -> Dict[str, Dict[str, Union[str, List[str]]]]:
log.info(f"Reading cache-db: {self.db_path}")
try:
db_txt = self.db_path.read_text(encoding="utf8")
db_dict: dict[str, dict] = json.loads(db_txt)
db_dict: Dict[str, Dict[str, Union[str, List[str]]]] = json.loads(db_txt)
except Exception as exc:
log.error("Can't load cache-db")
raise exc
@ -73,7 +75,7 @@ class CacheDB:
raise exc
def sort_chapters(chapters: list) -> List[str]:
def sort_chapters(chapters: List[str]) -> List[str]:
try:
sorted_list = sorted(chapters, key=float)
except Exception:

View file

@ -1,5 +1,6 @@
import sys
from pathlib import Path
from typing import Any, List
import click
from click_option_group import (
@ -15,7 +16,7 @@ from mangadlp.logger import prepare_logger
# read in the list of links from a file
def readin_list(_ctx, _param, value) -> list:
def readin_list(_ctx: click.Context, _param: str, value: str) -> List[str]:
if not value:
return []
@ -38,8 +39,8 @@ def readin_list(_ctx, _param, value) -> list:
@click.help_option()
@click.version_option(version=__version__, package_name="manga-dlp")
# manga selection
@optgroup.group("source", cls=RequiredMutuallyExclusiveOptionGroup)
@optgroup.option(
@optgroup.group("source", cls=RequiredMutuallyExclusiveOptionGroup) # type: ignore
@optgroup.option( # type: ignore
"-u",
"--url",
"--uuid",
@ -49,19 +50,19 @@ def readin_list(_ctx, _param, value) -> list:
show_default=True,
help="URL or UUID of the manga",
)
@optgroup.option(
@optgroup.option( # type: ignore
"--read",
"read_mangas",
is_eager=True,
callback=readin_list,
type=click.Path(exists=True, dir_okay=False),
type=click.Path(exists=True, dir_okay=False, path_type=str),
default=None,
show_default=True,
help="Path of file with manga links to download. One per line",
)
# logging options
@optgroup.group("verbosity", cls=MutuallyExclusiveOptionGroup)
@optgroup.option(
@optgroup.group("verbosity", cls=MutuallyExclusiveOptionGroup) # type: ignore
@optgroup.option( # type: ignore
"--loglevel",
"verbosity",
type=int,
@ -69,7 +70,7 @@ def readin_list(_ctx, _param, value) -> list:
show_default=True,
help="Custom log level",
)
@optgroup.option(
@optgroup.option( # type: ignore
"--warn",
"verbosity",
flag_value=30,
@ -77,7 +78,7 @@ def readin_list(_ctx, _param, value) -> list:
show_default=True,
help="Only log warnings and higher",
)
@optgroup.option(
@optgroup.option( # type: ignore
"--debug",
"verbosity",
flag_value=10,
@ -227,12 +228,8 @@ def readin_list(_ctx, _param, value) -> list:
help="Enable/disable creation of metadata via ComicInfo.xml",
)
@click.pass_context
def main(ctx: click.Context, **kwargs) -> None:
"""
Script to download mangas from various sites
"""
def main(ctx: click.Context, **kwargs: Any) -> None:
"""Script to download mangas from various sites."""
url_uuid: str = kwargs.pop("url_uuid")
read_mangas: list[str] = kwargs.pop("read_mangas")
verbosity: int = kwargs.pop("verbosity")

View file

@ -2,7 +2,7 @@ import logging
import shutil
from pathlib import Path
from time import sleep
from typing import Union
from typing import List, Union
import requests
from loguru import logger as log
@ -12,7 +12,7 @@ from mangadlp import utils
# download images
def download_chapter(
image_urls: list,
image_urls: List[str],
chapter_path: Union[str, Path],
download_wait: float,
) -> None:
@ -48,8 +48,8 @@ def download_chapter(
# write image
try:
with image_path.open("wb") as file:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, file)
r.raw.decode_content = True # pyright:ignore
shutil.copyfileobj(r.raw, file) # pyright:ignore
except Exception as exc:
log.error("Can't write file")
raise exc

View file

@ -1,11 +1,15 @@
import os
import subprocess
from typing import Any
from loguru import logger as log
def run_hook(command: str, hook_type: str, **kwargs) -> int:
"""
def run_hook(command: str, hook_type: str, **kwargs: Any) -> int:
"""Run a command.
Run a command with subprocess.run and add kwargs to the environment.
Args:
command (str): command to run
hook_type (str): type of the hook
@ -14,7 +18,6 @@ def run_hook(command: str, hook_type: str, **kwargs) -> int:
Returns:
exit_code (int): exit code of command
"""
# check if hook commands are empty
if not command or command == "None":
log.debug(f"Hook '{hook_type}' empty. Not running")

View file

@ -1,5 +1,6 @@
import logging
import sys
from typing import Any, Dict
from loguru import logger
@ -8,11 +9,9 @@ LOGURU_FMT = "{time:%Y-%m-%dT%H:%M:%S%z} | <level>[{level: <7}]</level> [{name:
# from loguru docs
class InterceptHandler(logging.Handler):
"""
Intercept python logging messages and log them via loguru.logger
"""
"""Intercept python logging messages and log them via loguru.logger."""
def emit(self, record):
def emit(self, record: Any) -> None:
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
@ -21,8 +20,8 @@ class InterceptHandler(logging.Handler):
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
while frame.f_code.co_filename == logging.__file__: # pyright:ignore
frame = frame.f_back # type: ignore
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
@ -32,7 +31,7 @@ class InterceptHandler(logging.Handler):
# init logger with format and log level
def prepare_logger(loglevel: int = 20) -> None:
config: dict = {
config: Dict[str, Any] = {
"handlers": [
{
"sink": sys.stdout,

View file

@ -1,14 +1,18 @@
from pathlib import Path
from typing import Any, Dict, Tuple
from typing import Any, Dict, List, Tuple, Union
import xmltodict
from loguru import logger as log
from mangadlp.types import ComicInfo
METADATA_FILENAME = "ComicInfo.xml"
METADATA_TEMPLATE = Path("mangadlp/metadata/ComicInfo_v2.0.xml")
# define metadata types, defaults and valid values. an empty list means no value check
# {key: (type, default value, valid values)}
METADATA_TYPES: Dict[str, Tuple[type, Any, list]] = {
METADATA_TYPES: Dict[
str, Tuple[Any, Union[str, int, None], List[Union[str, int, None]]]
] = {
"Title": (str, None, []),
"Series": (str, None, []),
"Number": (str, None, []),
@ -59,10 +63,10 @@ METADATA_TYPES: Dict[str, Tuple[type, Any, list]] = {
}
def validate_metadata(metadata_in: dict) -> Dict[str, dict]:
def validate_metadata(metadata_in: ComicInfo) -> Dict[str, ComicInfo]:
log.info("Validating metadata")
metadata_valid: dict[str, dict] = {"ComicInfo": {}}
metadata_valid: dict[str, ComicInfo] = {"ComicInfo": {}}
for key, value in METADATA_TYPES.items():
metadata_type, metadata_default, metadata_validation = value
@ -75,7 +79,7 @@ def validate_metadata(metadata_in: dict) -> Dict[str, dict]:
# check if metadata key is available
try:
md_to_check = metadata_in[key]
md_to_check: Union[str, int, None] = metadata_in[key]
except KeyError:
continue
# check if provided metadata item is empty
@ -84,7 +88,7 @@ def validate_metadata(metadata_in: dict) -> Dict[str, dict]:
# check if metadata type is correct
log.debug(f"Key:{key} -> value={type(md_to_check)} -> check={metadata_type}")
if not isinstance(md_to_check, metadata_type): # noqa
if not isinstance(md_to_check, metadata_type):
log.warning(
f"Metadata has wrong type: {key}:{metadata_type} -> {md_to_check}"
)
@ -104,8 +108,8 @@ def validate_metadata(metadata_in: dict) -> Dict[str, dict]:
return metadata_valid
def write_metadata(chapter_path: Path, metadata: dict) -> None:
if metadata["Format"] == "pdf":
def write_metadata(chapter_path: Path, metadata: ComicInfo) -> None:
if metadata["Format"] == "pdf": # pyright:ignore
log.warning("Can't add metadata for pdf format. Skipping")
return

50
mangadlp/types.py Normal file
View file

@ -0,0 +1,50 @@
from typing import Optional, TypedDict
class ComicInfo(TypedDict, total=False):
"""ComicInfo.xml basic types.
Validation is done via metadata.validate_metadata()
All valid types and values are specified in metadata.METADATA_TYPES
"""
Title: Optional[str]
Series: Optional[str]
Number: Optional[str]
Count: Optional[int]
Volume: Optional[int]
AlternateSeries: Optional[str]
AlternateNumber: Optional[str]
AlternateCount: Optional[int]
Summary: Optional[str]
Notes: Optional[str]
Year: Optional[int]
Month: Optional[int]
Day: Optional[int]
Writer: Optional[str]
Colorist: Optional[str]
Publisher: Optional[str]
Genre: Optional[str]
Web: Optional[str]
PageCount: Optional[int]
LanguageISO: Optional[str]
Format: Optional[str]
BlackAndWhite: Optional[str]
Manga: Optional[str]
ScanInformation: Optional[str]
SeriesGroup: Optional[str]
AgeRating: Optional[str]
CommunityRating: Optional[int]
class ChapterData(TypedDict):
"""Basic chapter-data types.
All values have to be provided.
"""
uuid: str
volume: str
chapter: str
name: str
pages: int

View file

@ -24,7 +24,7 @@ def make_archive(chapter_path: Path, file_format: str) -> None:
def make_pdf(chapter_path: Path) -> None:
try:
import img2pdf # pylint: disable=import-outside-toplevel
import img2pdf # pylint: disable=import-outside-toplevel # pyright:ignore
except Exception as exc:
log.error("Cant import img2pdf. Please install it first")
raise exc
@ -34,14 +34,14 @@ def make_pdf(chapter_path: Path) -> None:
for file in chapter_path.iterdir():
images.append(str(file))
try:
pdf_path.write_bytes(img2pdf.convert(images))
pdf_path.write_bytes(img2pdf.convert(images)) # pyright:ignore
except Exception as exc:
log.error("Can't create '.pdf' archive")
raise exc
# create a list of chapters
def get_chapter_list(chapters: str, available_chapters: list) -> List[str]:
def get_chapter_list(chapters: str, available_chapters: List[str]) -> List[str]:
# check if there are available chapter
chapter_list: list[str] = []
for chapter in chapters.split(","):

View file

@ -9,14 +9,8 @@ description = "A cli manga downloader"
readme = "README.md"
license = "MIT"
requires-python = ">=3.8"
authors = [
{ name = "Ivan Schaller", email = "ivan@schaller.sh" },
]
keywords = [
"manga",
"downloader",
"mangadex",
]
authors = [{ name = "Ivan Schaller", email = "ivan@schaller.sh" }]
keywords = ["manga", "downloader", "mangadex"]
classifiers = [
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
@ -30,7 +24,7 @@ dependencies = [
"loguru>=0.6.0",
"click>=8.1.3",
"click-option-group>=0.5.5",
"xmltodict>=0.13.0"
"xmltodict>=0.13.0",
]
[project.urls]
@ -69,40 +63,82 @@ dependencies = [
"pytest>=7.0.0",
"coverage>=6.3.1",
"black>=22.1.0",
"isort>=5.10.0",
"pylint>=2.13.0",
"mypy>=0.940",
"tox>=3.24.5",
"autoflake>=1.4",
"pylama>=8.3.8",
"ruff>=0.0.247",
]
[tool.isort]
py_version = 39
skip_gitignore = true
line_length = 88
profile = "black"
multi_line_output = 3
include_trailing_comma = true
use_parentheses = true
# pyright
[tool.mypy]
python_version = "3.9"
disallow_untyped_defs = false
follow_imports = "normal"
ignore_missing_imports = true
warn_no_return = false
warn_unused_ignores = true
show_error_context = true
show_column_numbers = true
show_error_codes = true
pretty = true
no_implicit_optional = false
[tool.pyright]
typeCheckingMode = "strict"
pythonVersion = "3.9"
reportUnnecessaryTypeIgnoreComment = true
reportShadowedImports = true
reportUnusedExpression = true
reportMatchNotExhaustive = true
# venvPath = "."
# venv = "venv"
# ruff
[tool.ruff]
target-version = "py39"
select = [
"E", # pycodetyle err
"W", # pycodetyle warn
"D", # pydocstyle
"C90", # mccabe
"I", # isort
"PLE", # pylint err
"PLW", # pylint warn
"PLC", # pylint convention
"PLR", # pylint refactor
"F", # pyflakes
"RUF", # ruff specific
]
line-length = 88
fix = true
show-fixes = true
format = "grouped"
ignore-init-module-imports = true
respect-gitignore = true
ignore = ["E501", "D103", "D100", "D102", "PLR2004"]
exclude = [
".direnv",
".git",
".mypy_cache",
".ruff_cache",
".svn",
".venv",
"venv",
"__pypackages__",
"build",
"dist",
"venv",
]
[tool.ruff.per-file-ignores]
"__init__.py" = ["D104"]
[tool.ruff.pylint]
max-args = 10
[tool.ruff.mccabe]
max-complexity = 10
[tool.ruff.pydocstyle]
convention = "google"
[tool.ruff.pycodestyle]
max-doc-length = 88
# pytest
[tool.pytest.ini_options]
pythonpath = [
"."
]
pythonpath = ["."]
# coverage
[tool.coverage.run]
source = ["mangadlp"]
@ -127,12 +163,3 @@ exclude_lines = [
"@(abc.)?abstractmethod",
]
ignore_errors = true
[tool.pylint.main]
py-version = "3.9"
[tool.pylint.logging]
logging-modules = ["logging", "loguru"]
disable = "C0301, C0114, C0116, W0703, R0902, R0913, E0401, W1203"
good-names = "r"
logging-format-style = "new"

View file

@ -16,8 +16,6 @@ def test_read_and_url():
def test_no_read_and_url():
url_uuid = "https://mangadex.org/title/0aea9f43-d4a9-4bf7-bebc-550a512f9b95/shikimori-s-not-just-a-cutie"
link_file = "tests/testfile.txt"
language = "en"
chapters = "1"
file_format = "cbz"
@ -30,7 +28,6 @@ def test_no_read_and_url():
def test_no_chaps():
url_uuid = "https://mangadex.org/title/0aea9f43-d4a9-4bf7-bebc-550a512f9b95/shikimori-s-not-just-a-cutie"
language = "en"
chapters = ""
file_format = "cbz"
download_path = "tests"
command_args = f"-u {url_uuid} -l {language} --path {download_path} --format {file_format} --debug"

View file

@ -40,7 +40,7 @@ def test_manga_pre_hook(wait_10s):
manga_pre_hook,
]
script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args
command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0
assert hook_file.is_file()
@ -72,7 +72,7 @@ def test_manga_post_hook(wait_10s):
manga_post_hook,
]
script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args
command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0
assert hook_file.is_file()
@ -104,7 +104,7 @@ def test_chapter_pre_hook(wait_10s):
chapter_pre_hook,
]
script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args
command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0
assert hook_file.is_file()
@ -136,7 +136,7 @@ def test_chapter_post_hook(wait_10s):
chapter_post_hook,
]
script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args
command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0
assert hook_file.is_file()
@ -176,7 +176,7 @@ def test_all_hooks(wait_10s):
chapter_post_hook,
]
script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args
command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0
assert Path("tests/manga-pre2.txt").is_file()

View file

@ -6,7 +6,7 @@ from mangadlp.cache import CacheDB
def test_cache_creation():
cache_file = Path("cache.json")
cache = CacheDB(cache_file, "abc", "en", "test")
CacheDB(cache_file, "abc", "en", "test")
assert cache_file.exists()
cache_file.unlink()

View file

@ -133,7 +133,7 @@ def test_metadata_chapter_validity(wait_20s):
schema = xmlschema.XMLSchema("mangadlp/metadata/ComicInfo_v2.0.xsd")
script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args
command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0
assert metadata_path.is_file()

View file

@ -56,7 +56,7 @@ def test_alt_title_fallback():
forcevol = False
test = Mangadex(url_uuid, language, forcevol)
assert test.manga_title == "Iruma à lécole des démons"
assert test.manga_title == "Iruma à lécole des démons" # noqa
def test_chapter_infos():
@ -206,7 +206,6 @@ def test_get_chapter_images():
test = Mangadex(url_uuid, language, forcevol)
img_base_url = "https://uploads.mangadex.org"
chapter_hash = "0752bc5db298beff6b932b9151dd8437"
chapter_uuid = "e86ec2c4-c5e4-4710-bfaa-7604f00939c7"
chapter_num = "1"
test_list = [
f"{img_base_url}/data/{chapter_hash}/x1-0deb4c9bfedd5be49e0a90cfb17cf343888239898c9e7451d569c0b3ea2971f4.jpg",

View file

@ -24,8 +24,3 @@ commands =
coverage erase
coverage run
coverage xml -i
[pylama]
format = pycodestyle
linters = mccabe,pycodestyle,pyflakes
ignore = E501,C901,C0301