[2.3.1] - 2023-03-12 #41

Merged
olofvndrhr merged 19 commits from dev into master 2023-03-12 04:47:37 +01:00
28 changed files with 531 additions and 450 deletions

View file

@ -26,7 +26,7 @@ pipeline:
branch: master branch: master
event: pull_request event: pull_request
commands: commands:
- python3 -m hatch build --clean - just test_build
# create release-notes # create release-notes
test-create-release-notes: test-create-release-notes:

View file

@ -26,4 +26,4 @@ pipeline:
branch: master branch: master
event: pull_request event: pull_request
commands: commands:
- python3 -m tox - just test_tox

View file

@ -29,4 +29,4 @@ pipeline:
- grep -v img2pdf contrib/requirements_dev.txt > contrib/requirements_dev_arm64.txt - grep -v img2pdf contrib/requirements_dev.txt > contrib/requirements_dev_arm64.txt
- rm -f contrib/requirements_dev.txt - rm -f contrib/requirements_dev.txt
- mv contrib/requirements_dev_arm64.txt contrib/requirements_dev.txt - mv contrib/requirements_dev_arm64.txt contrib/requirements_dev.txt
- python3 -m tox - just test_tox

View file

@ -17,51 +17,29 @@ pipeline:
image: cr.44net.ch/ci-plugins/tests image: cr.44net.ch/ci-plugins/tests
pull: true pull: true
commands: commands:
- shfmt -d -i 4 -bn -ci -sr . - just test_shfmt
# check code style - python # check code style - python
test-black: test-black:
image: cr.44net.ch/ci-plugins/tests image: cr.44net.ch/ci-plugins/tests
pull: true pull: true
commands: commands:
- python3 -m black --check --diff . - just test_black
# check imports - python
test-isort:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m isort --check-only --diff .
# check unused and missing imports - python
test-autoflake:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m autoflake --remove-all-unused-imports -r -v mangadlp/
- python3 -m autoflake --check --remove-all-unused-imports -r -v mangadlp/
# check static typing - python # check static typing - python
test-mypy: test-pyright:
image: cr.44net.ch/ci-plugins/tests image: cr.44net.ch/ci-plugins/tests
pull: true pull: true
commands: commands:
- python3 -m mypy --install-types --non-interactive mangadlp/ - just install_deps
- just test_pyright
# mccabe, pycodestyle, pyflakes tests - python # ruff test - python
test-pylama: test-ruff:
image: cr.44net.ch/ci-plugins/tests image: cr.44net.ch/ci-plugins/tests
pull: true pull: true
commands: commands:
- python3 -m pylama mangadlp/ - just test_ruff
# pylint test - python
test-pylint:
image: cr.44net.ch/ci-plugins/tests
pull: true
commands:
- python3 -m pip install -r requirements.txt
- python3 -m pylint --fail-under 9 mangadlp/
# test mkdocs generation # test mkdocs generation
test-mkdocs: test-mkdocs:
@ -72,14 +50,14 @@ pipeline:
- cd docs || exit 1 - cd docs || exit 1
- python3 -m mkdocs build --strict - python3 -m mkdocs build --strict
# test code with different python versions - python # test code with pytest - python
test-tox-pytest: test-tox-pytest:
when: when:
event: [ push ] event: [ push ]
image: cr.44net.ch/ci-plugins/tests image: cr.44net.ch/ci-plugins/tests
pull: true pull: true
commands: commands:
- python3 -m tox -e basic - just test_pytest
# generate coverage report - python # generate coverage report - python
test-tox-coverage: test-tox-coverage:
@ -89,7 +67,7 @@ pipeline:
image: cr.44net.ch/ci-plugins/tests image: cr.44net.ch/ci-plugins/tests
pull: true pull: true
commands: commands:
- python3 -m tox -e coverage - just test_coverage
# analyse code with sonarqube and upload it # analyse code with sonarqube and upload it
sonarqube-analysis: sonarqube-analysis:

View file

@ -9,6 +9,22 @@ to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
- Add support for more sites - Add support for more sites
## [2.3.1] - 2023-03-12
### Added
- Added TypedDicts for type checkers and type annotation
### Fixed
- Fixed some typos in the README
### Changed
- Switched from pylint/pylama/isort/autoflake to ruff
- Switched from mypy to pyright and added strict type checking
- Updated the api template
## [2.3.0] - 2023-02-15 ## [2.3.0] - 2023-02-15
### Added ### Added

View file

@ -19,31 +19,43 @@ Code Analysis
Meta Meta
[![Code style](https://img.shields.io/badge/code%20style-black-black)](https://github.com/psf/black) [![Code style](https://img.shields.io/badge/code%20style-black-black)](https://github.com/psf/black)
[![Linter](https://img.shields.io/badge/linter-pylint-yellowgreen)](https://pylint.pycqa.org/en/latest/) [![Linter](https://img.shields.io/badge/linter-ruff-red)](https://github.com/charliermarsh/ruff)
[![Types](https://img.shields.io/badge/types-mypy-blue)](https://github.com/python/mypy) [![Types](https://img.shields.io/badge/types-pyright-blue)](https://github.com/microsoft/pyright)
[![Imports](https://img.shields.io/badge/imports-isort-ef8336.svg)](https://github.com/pycqa/isort)
[![Tests](https://img.shields.io/badge/tests-pytest%20%7C%20tox-yellow)](https://github.com/pytest-dev/pytest/) [![Tests](https://img.shields.io/badge/tests-pytest%20%7C%20tox-yellow)](https://github.com/pytest-dev/pytest/)
[![Coverage](https://img.shields.io/badge/coverage-coveragepy-green)](https://github.com/nedbat/coveragepy) [![Coverage](https://img.shields.io/badge/coverage-coveragepy-green)](https://github.com/nedbat/coveragepy)
[![License](https://img.shields.io/badge/license-MIT-9400d3.svg)](https://snyk.io/learn/what-is-mit-license/) [![License](https://img.shields.io/badge/license-MIT-9400d3.svg)](https://snyk.io/learn/what-is-mit-license/)
[![Compatibility](https://img.shields.io/pypi/pyversions/manga-dlp)](https://pypi.org/project/manga-dlp/) [![Compatibility](https://img.shields.io/pypi/pyversions/manga-dlp)](https://pypi.org/project/manga-dlp/)
--- ---
## Description ## Description
A manga download script written in python. It only supports [mangadex.org](https://mangadex.org/) for now. But support A manga download script written in python. It only supports [mangadex.org](https://mangadex.org/) for now. But support
for other sites is planned. for other sites is _planned™_.
Before downloading a new chapter, the script always checks if there is already a chapter with the same name in the Before downloading a new chapter, the script always checks if there is already a chapter with the same name in the
download directory. If found the chapter is skipped. So you can run the script on a schedule to only download new download directory. If found the chapter is skipped. So you can run the script on a schedule to only download new
chapters without any additional setup. chapters without any additional setup.
The default behaiviour is to pack the images to a [cbz archive](https://en.wikipedia.org/wiki/Comic_book_archive). If The default behaiviour is to pack the images to a [cbz archive](https://en.wikipedia.org/wiki/Comic_book_archive). If
you just want the folder with all the pictures use the flag `--nocbz`. you just want the folder with all the pictures use the flag `--format ""`.
## _Currently_ Supported sites ## _Currently_ Supported sites
- [Mangadex.org](https://mangadex.org/) - [Mangadex.org](https://mangadex.org/)
## Features (not complete)
- Metadata support with [ComicInfo.xml](https://anansi-project.github.io/docs/comicinfo/intro)
- Json caching
- Custom hooks after/before each download
- Custom chapter name format
- Volume support
- Multiple archive formats supported (cbz,cbr,zip,none)
- Language selection
- Download all chapters directly
- And others...
## Usage ## Usage
### Quick start ### Quick start
@ -124,18 +136,18 @@ verbosity: [mutually_exclusive]
For suggestions for improvement, just open a pull request. For suggestions for improvement, just open a pull request.
If you want to add support for a new site, there is an api [template file](./contrib/api_template.py) which you can use. If you want to add support for a new site, there is an api [template file](contrib/api_template.py) which you can use.
And more infos and tools in the contrib [README.md](contrib/README.md) And more infos and tools are in the contrib [README.md](contrib/README.md)
Otherwise, you can open am issue with the name of the site which you want support for. (not guaranteed to be Otherwise, you can open an issue with the name of the site which you want support for (not guaranteed to be
implemented) implemented).
If you encounter any bugs, also just open an issue with a description of the problem. If you encounter any bugs, also just open an issue with a description of the problem.
## TODO's ## TODO's
- <del>Make docker container for easy distribution</del> - <del>Make docker container for easy distribution</del>
--> [Dockerhub](https://hub.docker.com/repository/docker/olofvndrhr/manga-dlp) --> [Dockerhub](https://hub.docker.com/r/olofvndrhr/manga-dlp)
- <del>Automate release</del> - <del>Automate release</del>
--> Done with woodpecker-ci --> Done with woodpecker-ci
- <del>Make pypi package</del> - <del>Make pypi package</del>

View file

@ -1,9 +1,14 @@
from typing import Dict, List, Union
from mangadlp.types import ChapterData,ComicInfo
# api template for manga-dlp # api template for manga-dlp
class YourAPI: class YourAPI:
"""Your API Class. """Your API Class.
Get infos for a manga from example.org
Get infos for a manga from example.org.
Args: Args:
url_uuid (str): URL or UUID of the manga url_uuid (str): URL or UUID of the manga
@ -22,10 +27,8 @@ class YourAPI:
api_base_url = "https://api.mangadex.org" api_base_url = "https://api.mangadex.org"
img_base_url = "https://uploads.mangadex.org" img_base_url = "https://uploads.mangadex.org"
def __init__(self, url_uuid, language, forcevol): def __init__(self, url_uuid: str, language: str, forcevol: bool):
""" """get infos to initiate class."""
get infos to initiate class
"""
self.api_name = "Your API Name" self.api_name = "Your API Name"
self.url_uuid = url_uuid self.url_uuid = url_uuid
@ -36,22 +39,24 @@ class YourAPI:
self.manga_uuid = "abc" self.manga_uuid = "abc"
self.manga_title = "abc" self.manga_title = "abc"
self.chapter_list = ["1", "2", "2.1", "5", "10"] self.chapter_list = ["1", "2", "2.1", "5", "10"]
self.manga_chapter_data = { # example data self.manga_chapter_data: Dict[str, ChapterData] = { # example data
"1": { "1": {
"uuid": "abc", "uuid": "abc",
"volume": "1", "volume": "1",
"chapter": "1", "chapter": "1",
"name": "test", "name": "test",
"pages" 2,
}, },
"2": { "2": {
"uuid": "abc", "uuid": "abc",
"volume": "1", "volume": "1",
"chapter": "2", "chapter": "2",
"name": "test", "name": "test",
"pages": 45,
}, },
} }
# or with --forcevol # or with --forcevol
self.manga_chapter_data = { self.manga_chapter_data: Dict[str, ChapterData] = {
"1:1": { "1:1": {
"uuid": "abc", "uuid": "abc",
"volume": "1", "volume": "1",
@ -66,9 +71,8 @@ class YourAPI:
}, },
} }
def get_chapter_images(chapter: str, download_wait: float) -> list: def get_chapter_images(self, chapter: str, wait_time: float) -> List[str]:
""" """Get chapter images as a list (full links).
Get chapter images as a list (full links)
Args: Args:
chapter: The chapter number (chapter data index) chapter: The chapter number (chapter data index)
@ -77,7 +81,6 @@ class YourAPI:
Returns: Returns:
The list of urls of the page images The list of urls of the page images
""" """
# example # example
return [ return [
"https://abc.def/image/123.png", "https://abc.def/image/123.png",
@ -85,10 +88,10 @@ class YourAPI:
"https://abc.def/image/12345.png", "https://abc.def/image/12345.png",
] ]
def create_metadata(self, chapter: str) -> dict: def create_metadata(self, chapter: str) -> ComicInfo:
""" """Get metadata with correct keys for ComicInfo.xml.
Get metadata with correct keys for ComicInfo.xml
Provide as much metadata as possible. empty/false values will be ignored Provide as much metadata as possible. empty/false values will be ignored.
Args: Args:
chapter: The chapter number (chapter data index) chapter: The chapter number (chapter data index)
@ -96,7 +99,6 @@ class YourAPI:
Returns: Returns:
The metadata as a dict The metadata as a dict
""" """
# metadata types. have to be valid # metadata types. have to be valid
# {key: (type, default value, valid values)} # {key: (type, default value, valid values)}
{ {
@ -155,7 +157,7 @@ class YourAPI:
# example # example
return { return {
"Volume": "abc", "Volume": 1,
"LanguageISO": "en", "LanguageISO": "en",
"Title": "test", "Title": "test",
} }

View file

@ -14,9 +14,7 @@ hatchling>=1.11.0
pytest>=7.0.0 pytest>=7.0.0
coverage>=6.3.1 coverage>=6.3.1
black>=22.1.0 black>=22.1.0
isort>=5.10.0
pylint>=2.13.0
mypy>=0.940 mypy>=0.940
tox>=3.24.5 tox>=3.24.5
autoflake>=1.4 ruff>=0.0.247
pylama>=8.3.8 pyright>=1.1.294

View file

@ -17,31 +17,43 @@ Code Analysis
Meta Meta
[![Code style](https://img.shields.io/badge/code%20style-black-black)](https://github.com/psf/black) [![Code style](https://img.shields.io/badge/code%20style-black-black)](https://github.com/psf/black)
[![Linter](https://img.shields.io/badge/linter-pylint-yellowgreen)](https://pylint.pycqa.org/en/latest/) [![Linter](https://img.shields.io/badge/linter-ruff-red)](https://github.com/charliermarsh/ruff)
[![Types](https://img.shields.io/badge/types-mypy-blue)](https://github.com/python/mypy) [![Types](https://img.shields.io/badge/types-pyright-blue)](https://github.com/microsoft/pyright)
[![Imports](https://img.shields.io/badge/imports-isort-ef8336.svg)](https://github.com/pycqa/isort)
[![Tests](https://img.shields.io/badge/tests-pytest%20%7C%20tox-yellow)](https://github.com/pytest-dev/pytest/) [![Tests](https://img.shields.io/badge/tests-pytest%20%7C%20tox-yellow)](https://github.com/pytest-dev/pytest/)
[![Coverage](https://img.shields.io/badge/coverage-coveragepy-green)](https://github.com/nedbat/coveragepy) [![Coverage](https://img.shields.io/badge/coverage-coveragepy-green)](https://github.com/nedbat/coveragepy)
[![License](https://img.shields.io/badge/license-MIT-9400d3.svg)](https://snyk.io/learn/what-is-mit-license/) [![License](https://img.shields.io/badge/license-MIT-9400d3.svg)](https://snyk.io/learn/what-is-mit-license/)
[![Compatibility](https://img.shields.io/pypi/pyversions/manga-dlp)](https://pypi.org/project/manga-dlp/) [![Compatibility](https://img.shields.io/pypi/pyversions/manga-dlp)](https://pypi.org/project/manga-dlp/)
--- ---
## Description ## Description
A manga download script written in python. It only supports [mangadex.org](https://mangadex.org/) for now. But support A manga download script written in python. It only supports [mangadex.org](https://mangadex.org/) for now. But support
for other sites is planned. for other sites is _planned™_.
Before downloading a new chapter, the script always checks if there is already a chapter with the same name in the Before downloading a new chapter, the script always checks if there is already a chapter with the same name in the
download directory. If found the chapter is skipped. So you can run the script on a schedule to only download new download directory. If found the chapter is skipped. So you can run the script on a schedule to only download new
chapters without any additional setup. chapters without any additional setup.
The default behaiviour is to pack the images to a [cbz archive](https://en.wikipedia.org/wiki/Comic_book_archive). If The default behaiviour is to pack the images to a [cbz archive](https://en.wikipedia.org/wiki/Comic_book_archive). If
you just want the folder with all the pictures use the flag `--nocbz`. you just want the folder with all the pictures use the flag `--format ""`.
## _Currently_ Supported sites ## _Currently_ Supported sites
- [Mangadex.org](https://mangadex.org/) - [Mangadex.org](https://mangadex.org/)
## Features (not complete)
- Metadata support with [ComicInfo.xml](https://anansi-project.github.io/docs/comicinfo/intro)
- Json caching
- Custom hooks after/before each download
- Custom chapter name format
- Volume support
- Multiple archive formats supported (cbz,cbr,zip,none)
- Language selection
- Download all chapters directly
- And others...
## Usage ## Usage
### Quick start ### Quick start
@ -82,7 +94,7 @@ mangadlp <args> # call script directly
### With docker ### With docker
See the docker [README](docker/) See the docker [README](https://manga-dlp.ivn.sh/docker/)
## Options ## Options
@ -122,20 +134,18 @@ verbosity: [mutually_exclusive]
For suggestions for improvement, just open a pull request. For suggestions for improvement, just open a pull request.
If you want to add support for a new site, there is an If you want to add support for a new site, there is an api [template file](https://github.com/olofvndrhr/manga-dlp/tree/master/contrib/api_template.py) which you can use.
api [template file](https://github.com/olofvndrhr/manga-dlp/blob/master/contrib/api_template.py) which you can use. And more infos and tools are in the contrib [README.md](https://github.com/olofvndrhr/manga-dlp/tree/master/contrib/README.md)
And more infos and tools in the
contrib [README.md](https://github.com/olofvndrhr/manga-dlp/blob/master/contrib/README.md)
Otherwise, you can open am issue with the name of the site which you want support for. (not guaranteed to be Otherwise, you can open an issue with the name of the site which you want support for (not guaranteed to be
implemented) implemented).
If you encounter any bugs, also just open an issue with a description of the problem. If you encounter any bugs, also just open an issue with a description of the problem.
## TODO's ## TODO's
- <del>Make docker container for easy distribution</del> - <del>Make docker container for easy distribution</del>
--> [Dockerhub](https://hub.docker.com/repository/docker/olofvndrhr/manga-dlp) --> [Dockerhub](https://hub.docker.com/r/olofvndrhr/manga-dlp)
- <del>Automate release</del> - <del>Automate release</del>
--> Done with woodpecker-ci --> Done with woodpecker-ci
- <del>Make pypi package</del> - <del>Make pypi package</del>

View file

@ -68,45 +68,39 @@ create_venv:
@python3 -m venv venv @python3 -m venv venv
install_deps: install_deps:
@echo "installing dependencies"
@pip3 install -r requirements.txt
install_deps_dev:
@echo "installing dependencies" @echo "installing dependencies"
@pip3 install -r contrib/requirements_dev.txt @pip3 install -r contrib/requirements_dev.txt
test_shfmt: test_shfmt:
@find . -type f \( -name "**.sh" -and -not -path "./venv/*" -and -not -path "./.tox/*" \) -exec shfmt -d -i 4 -bn -ci -sr "{}" \+; @find . -type f \( -name "**.sh" -and -not -path "./.**" -and -not -path "./venv**" \) -exec shfmt -d -i 4 -bn -ci -sr "{}" \+;
test_black: test_black:
@python3 -m black --check --diff . @python3 -m black --check --diff mangadlp/
test_isort: test_pyright:
@python3 -m isort --check-only --diff . @python3 -m pyright mangadlp/
test_mypy: test_ruff:
@python3 -m mypy --install-types --non-interactive mangadlp/ @python3 -m ruff --diff mangadlp/
test_ci_conf:
@woodpecker-cli lint .woodpecker/
test_pytest: test_pytest:
@python3 -m tox -e basic @python3 -m tox -e basic
test_autoflake: test_coverage:
@python3 -m autoflake --remove-all-unused-imports -r -v mangadlp/ @python3 -m tox -e coverage
@python3 -m autoflake --check --remove-all-unused-imports -r -v mangadlp/
test_pylama:
@python3 -m pylama --options tox.ini mangadlp/
test_pylint:
@python3 -m pylint --fail-under 9 mangadlp/
test_tox: test_tox:
@python3 -m tox @python3 -m tox
test_tox_coverage:
@python3 -m tox -e coverage
test_build: test_build:
@python3 -m hatch build @python3 -m hatch build --clean
test_ci_conf:
@woodpecker-cli lint .woodpecker/
test_docker_build: test_docker_build:
@docker build . -f docker/Dockerfile.amd64 -t manga-dlp:test @docker build . -f docker/Dockerfile.amd64 -t manga-dlp:test
@ -123,11 +117,8 @@ lint:
-just test_ci_conf -just test_ci_conf
just test_shfmt just test_shfmt
just test_black just test_black
just test_isort just test_pyright
just test_mypy just test_ruff
just test_autoflake
just test_pylama
just test_pylint
@echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n" @echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n"
tests: tests:
@ -135,11 +126,8 @@ tests:
-just test_ci_conf -just test_ci_conf
just test_shfmt just test_shfmt
just test_black just test_black
just test_isort just test_pyright
just test_mypy just test_ruff
just test_autoflake
just test_pylama
just test_pylint
just test_pytest just test_pytest
@echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n" @echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n"
@ -148,13 +136,10 @@ tests_full:
-just test_ci_conf -just test_ci_conf
just test_shfmt just test_shfmt
just test_black just test_black
just test_isort just test_pyright
just test_mypy just test_ruff
just test_autoflake
just test_pylama
just test_pylint
just test_build just test_build
just test_tox just test_tox
just test_tox_coverage just test_coverage
just test_docker_build just test_docker_build
@echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n" @echo -e "\n\033[0;32m=== ALL DONE ===\033[0m\n"

View file

@ -1 +1 @@
__version__ = "2.3.0" __version__ = "2.3.1"

View file

@ -1,15 +1,18 @@
import re import re
from time import sleep from time import sleep
from typing import Any, Dict, List
import requests import requests
from loguru import logger as log from loguru import logger as log
from mangadlp import utils from mangadlp import utils
from mangadlp.types import ChapterData, ComicInfo
class Mangadex: class Mangadex:
"""Mangadex API Class. """Mangadex API Class.
Get infos for a manga from mangadex.org
Get infos for a manga from mangadex.org.
Args: Args:
url_uuid (str): URL or UUID of the manga url_uuid (str): URL or UUID of the manga
@ -64,10 +67,10 @@ class Mangadex:
log.error("No valid UUID found") log.error("No valid UUID found")
raise exc raise exc
return uuid return uuid # pyright:ignore
# make initial request # make initial request
def get_manga_data(self) -> dict: def get_manga_data(self) -> Dict[str, Any]:
log.debug(f"Getting manga data for: {self.manga_uuid}") log.debug(f"Getting manga data for: {self.manga_uuid}")
counter = 1 counter = 1
while counter <= 3: while counter <= 3:
@ -84,12 +87,14 @@ class Mangadex:
counter += 1 counter += 1
else: else:
break break
response_body: Dict[str, Dict[str, Any]] = response.json() # pyright:ignore
# check if manga exists # check if manga exists
if response.json()["result"] != "ok": if response_body["result"] != "ok": # type:ignore
log.error("Manga not found") log.error("Manga not found")
raise KeyError raise KeyError
return response.json()["data"] return response_body["data"]
# get the title of the manga (and fix the filename) # get the title of the manga (and fix the filename)
def get_manga_title(self) -> str: def get_manga_title(self) -> str:
@ -111,7 +116,7 @@ class Mangadex:
if item.get(self.language): if item.get(self.language):
alt_title = item alt_title = item
break break
title = alt_title[self.language] title = alt_title[self.language] # pyright:ignore
except (KeyError, UnboundLocalError): except (KeyError, UnboundLocalError):
log.warning( log.warning(
"Manga title also not found in alt titles. Falling back to english title" "Manga title also not found in alt titles. Falling back to english title"
@ -132,7 +137,7 @@ class Mangadex:
timeout=10, timeout=10,
) )
try: try:
total_chapters = r.json()["total"] total_chapters: int = r.json()["total"]
except Exception as exc: except Exception as exc:
log.error( log.error(
"Error retrieving the chapters list. Did you specify a valid language code?" "Error retrieving the chapters list. Did you specify a valid language code?"
@ -146,13 +151,13 @@ class Mangadex:
return total_chapters return total_chapters
# get chapter data like name, uuid etc # get chapter data like name, uuid etc
def get_chapter_data(self) -> dict: def get_chapter_data(self) -> Dict[str, ChapterData]:
log.debug(f"Getting chapter data for: {self.manga_uuid}") log.debug(f"Getting chapter data for: {self.manga_uuid}")
api_sorting = "order[chapter]=asc&order[volume]=asc" api_sorting = "order[chapter]=asc&order[volume]=asc"
# check for chapters in specified lang # check for chapters in specified lang
total_chapters = self.check_chapter_lang() total_chapters = self.check_chapter_lang()
chapter_data = {} chapter_data: dict[str, ChapterData] = {}
last_volume, last_chapter = ("", "") last_volume, last_chapter = ("", "")
offset = 0 offset = 0
while offset < total_chapters: # if more than 500 chapters while offset < total_chapters: # if more than 500 chapters
@ -160,8 +165,9 @@ class Mangadex:
f"{self.api_base_url}/manga/{self.manga_uuid}/feed?{api_sorting}&limit=500&offset={offset}&{self.api_additions}", f"{self.api_base_url}/manga/{self.manga_uuid}/feed?{api_sorting}&limit=500&offset={offset}&{self.api_additions}",
timeout=10, timeout=10,
) )
for chapter in r.json()["data"]: response_body: Dict[str, Any] = r.json()
attributes: dict = chapter["attributes"] for chapter in response_body["data"]:
attributes: Dict[str, Any] = chapter["attributes"]
# chapter infos from feed # chapter infos from feed
chapter_num: str = attributes.get("chapter") or "" chapter_num: str = attributes.get("chapter") or ""
chapter_vol: str = attributes.get("volume") or "" chapter_vol: str = attributes.get("volume") or ""
@ -203,7 +209,7 @@ class Mangadex:
return chapter_data return chapter_data
# get images for the chapter (mangadex@home) # get images for the chapter (mangadex@home)
def get_chapter_images(self, chapter: str, wait_time: float) -> list: def get_chapter_images(self, chapter: str, wait_time: float) -> List[str]:
log.debug(f"Getting chapter images for: {self.manga_uuid}") log.debug(f"Getting chapter images for: {self.manga_uuid}")
athome_url = f"{self.api_base_url}/at-home/server" athome_url = f"{self.api_base_url}/at-home/server"
chapter_uuid = self.manga_chapter_data[chapter]["uuid"] chapter_uuid = self.manga_chapter_data[chapter]["uuid"]
@ -237,11 +243,11 @@ class Mangadex:
if api_error: if api_error:
return [] return []
chapter_hash = api_data["chapter"]["hash"] chapter_hash = api_data["chapter"]["hash"] # pyright:ignore
chapter_img_data = api_data["chapter"]["data"] chapter_img_data = api_data["chapter"]["data"] # pyright:ignore
# get list of image urls # get list of image urls
image_urls = [] image_urls: List[str] = []
for image in chapter_img_data: for image in chapter_img_data:
image_urls.append(f"{self.img_base_url}/data/{chapter_hash}/{image}") image_urls.append(f"{self.img_base_url}/data/{chapter_hash}/{image}")
@ -250,9 +256,9 @@ class Mangadex:
return image_urls return image_urls
# create list of chapters # create list of chapters
def create_chapter_list(self) -> list: def create_chapter_list(self) -> List[str]:
log.debug(f"Creating chapter list for: {self.manga_uuid}") log.debug(f"Creating chapter list for: {self.manga_uuid}")
chapter_list = [] chapter_list: List[str] = []
for data in self.manga_chapter_data.values(): for data in self.manga_chapter_data.values():
chapter_number: str = data["chapter"] chapter_number: str = data["chapter"]
volume_number: str = data["volume"] volume_number: str = data["volume"]
@ -263,15 +269,15 @@ class Mangadex:
return chapter_list return chapter_list
def create_metadata(self, chapter: str) -> dict: def create_metadata(self, chapter: str) -> ComicInfo:
log.info("Creating metadata from api") log.info("Creating metadata from api")
chapter_data = self.manga_chapter_data[chapter] chapter_data = self.manga_chapter_data[chapter]
try: try:
volume = int(chapter_data.get("volume")) volume = int(chapter_data["volume"])
except (ValueError, TypeError): except (ValueError, TypeError):
volume = None volume = None
metadata = { metadata: ComicInfo = {
"Volume": volume, "Volume": volume,
"Number": chapter_data.get("chapter"), "Number": chapter_data.get("chapter"),
"PageCount": chapter_data.get("pages"), "PageCount": chapter_data.get("pages"),

View file

@ -1,7 +1,7 @@
import re import re
import shutil import shutil
from pathlib import Path from pathlib import Path
from typing import Any, Union from typing import Any, Dict, List, Tuple, Union
from loguru import logger as log from loguru import logger as log
@ -10,11 +10,12 @@ from mangadlp.api.mangadex import Mangadex
from mangadlp.cache import CacheDB from mangadlp.cache import CacheDB
from mangadlp.hooks import run_hook from mangadlp.hooks import run_hook
from mangadlp.metadata import write_metadata from mangadlp.metadata import write_metadata
from mangadlp.types import ChapterData
from mangadlp.utils import get_file_format from mangadlp.utils import get_file_format
def match_api(url_uuid: str) -> type: def match_api(url_uuid: str) -> type:
"""Match the correct api class from a string """Match the correct api class from a string.
Args: Args:
url_uuid: url/uuid to check url_uuid: url/uuid to check
@ -22,9 +23,8 @@ def match_api(url_uuid: str) -> type:
Returns: Returns:
The class of the API to use The class of the API to use
""" """
# apis to check # apis to check
apis: list[tuple[str, re.Pattern, type]] = [ apis: List[Tuple[str, re.Pattern[str], type]] = [
( (
"mangadex.org", "mangadex.org",
re.compile( re.compile(
@ -53,6 +53,7 @@ def match_api(url_uuid: str) -> type:
class MangaDLP: class MangaDLP:
"""Download Mangas from supported sites. """Download Mangas from supported sites.
After initialization, start the script with the function get_manga(). After initialization, start the script with the function get_manga().
Args: Args:
@ -108,7 +109,7 @@ class MangaDLP:
self.chapter_post_hook_cmd = chapter_post_hook_cmd self.chapter_post_hook_cmd = chapter_post_hook_cmd
self.cache_path = cache_path self.cache_path = cache_path
self.add_metadata = add_metadata self.add_metadata = add_metadata
self.hook_infos: dict = {} self.hook_infos: Dict[str, Any] = {}
# prepare everything # prepare everything
self._prepare() self._prepare()
@ -226,7 +227,7 @@ class MangaDLP:
skipped_chapters: list[Any] = [] skipped_chapters: list[Any] = []
error_chapters: list[Any] = [] error_chapters: list[Any] = []
for chapter in chapters_to_download: for chapter in chapters_to_download:
if self.cache_path and chapter in cached_chapters: if self.cache_path and chapter in cached_chapters: # pyright:ignore
log.info(f"Chapter '{chapter}' is in cache. Skipping download") log.info(f"Chapter '{chapter}' is in cache. Skipping download")
continue continue
@ -240,7 +241,7 @@ class MangaDLP:
skipped_chapters.append(chapter) skipped_chapters.append(chapter)
# update cache # update cache
if self.cache_path: if self.cache_path:
cache.add_chapter(chapter) cache.add_chapter(chapter) # pyright:ignore
continue continue
except Exception: except Exception:
# skip download/packing due to an error # skip download/packing due to an error
@ -273,7 +274,7 @@ class MangaDLP:
# update cache # update cache
if self.cache_path: if self.cache_path:
cache.add_chapter(chapter) cache.add_chapter(chapter) # pyright:ignore
# start chapter post hook # start chapter post hook
run_hook( run_hook(
@ -310,7 +311,7 @@ class MangaDLP:
# once called per chapter # once called per chapter
def get_chapter(self, chapter: str) -> Path: def get_chapter(self, chapter: str) -> Path:
# get chapter infos # get chapter infos
chapter_infos: dict = self.api.manga_chapter_data[chapter] chapter_infos: ChapterData = self.api.manga_chapter_data[chapter]
log.debug(f"Chapter infos: {chapter_infos}") log.debug(f"Chapter infos: {chapter_infos}")
# get image urls for chapter # get image urls for chapter
@ -352,7 +353,7 @@ class MangaDLP:
log.debug(f"Filename: '{chapter_filename}'") log.debug(f"Filename: '{chapter_filename}'")
# set download path for chapter (image folder) # set download path for chapter (image folder)
chapter_path = self.manga_path / chapter_filename chapter_path: Path = self.manga_path / chapter_filename
# set archive path with file format # set archive path with file format
chapter_archive_path = Path(f"{chapter_path}{self.file_format}") chapter_archive_path = Path(f"{chapter_path}{self.file_format}")

View file

@ -26,12 +26,14 @@ class CacheDB:
if not self.db_data.get(self.db_key): if not self.db_data.get(self.db_key):
self.db_data[self.db_key] = {} self.db_data[self.db_key] = {}
self.db_uuid_data: dict = self.db_data[self.db_key] self.db_uuid_data = self.db_data[self.db_key]
if not self.db_uuid_data.get("name"): if not self.db_uuid_data.get("name"):
self.db_uuid_data.update({"name": self.name}) self.db_uuid_data.update({"name": self.name})
self._write_db() self._write_db()
self.db_uuid_chapters: list = self.db_uuid_data.get("chapters") or [] self.db_uuid_chapters: List[str] = (
self.db_uuid_data.get("chapters") or [] # type:ignore
)
def _prepare_db(self) -> None: def _prepare_db(self) -> None:
if self.db_path.exists(): if self.db_path.exists():
@ -44,11 +46,11 @@ class CacheDB:
log.error("Can't create db-file") log.error("Can't create db-file")
raise exc raise exc
def _read_db(self) -> Dict[str, dict]: def _read_db(self) -> Dict[str, Dict[str, Union[str, List[str]]]]:
log.info(f"Reading cache-db: {self.db_path}") log.info(f"Reading cache-db: {self.db_path}")
try: try:
db_txt = self.db_path.read_text(encoding="utf8") db_txt = self.db_path.read_text(encoding="utf8")
db_dict: dict[str, dict] = json.loads(db_txt) db_dict: Dict[str, Dict[str, Union[str, List[str]]]] = json.loads(db_txt)
except Exception as exc: except Exception as exc:
log.error("Can't load cache-db") log.error("Can't load cache-db")
raise exc raise exc
@ -73,7 +75,7 @@ class CacheDB:
raise exc raise exc
def sort_chapters(chapters: list) -> List[str]: def sort_chapters(chapters: List[str]) -> List[str]:
try: try:
sorted_list = sorted(chapters, key=float) sorted_list = sorted(chapters, key=float)
except Exception: except Exception:

View file

@ -1,5 +1,6 @@
import sys import sys
from pathlib import Path from pathlib import Path
from typing import Any, List
import click import click
from click_option_group import ( from click_option_group import (
@ -15,7 +16,7 @@ from mangadlp.logger import prepare_logger
# read in the list of links from a file # read in the list of links from a file
def readin_list(_ctx, _param, value) -> list: def readin_list(_ctx: click.Context, _param: str, value: str) -> List[str]:
if not value: if not value:
return [] return []
@ -38,8 +39,8 @@ def readin_list(_ctx, _param, value) -> list:
@click.help_option() @click.help_option()
@click.version_option(version=__version__, package_name="manga-dlp") @click.version_option(version=__version__, package_name="manga-dlp")
# manga selection # manga selection
@optgroup.group("source", cls=RequiredMutuallyExclusiveOptionGroup) @optgroup.group("source", cls=RequiredMutuallyExclusiveOptionGroup) # type: ignore
@optgroup.option( @optgroup.option( # type: ignore
"-u", "-u",
"--url", "--url",
"--uuid", "--uuid",
@ -49,19 +50,19 @@ def readin_list(_ctx, _param, value) -> list:
show_default=True, show_default=True,
help="URL or UUID of the manga", help="URL or UUID of the manga",
) )
@optgroup.option( @optgroup.option( # type: ignore
"--read", "--read",
"read_mangas", "read_mangas",
is_eager=True, is_eager=True,
callback=readin_list, callback=readin_list,
type=click.Path(exists=True, dir_okay=False), type=click.Path(exists=True, dir_okay=False, path_type=str),
default=None, default=None,
show_default=True, show_default=True,
help="Path of file with manga links to download. One per line", help="Path of file with manga links to download. One per line",
) )
# logging options # logging options
@optgroup.group("verbosity", cls=MutuallyExclusiveOptionGroup) @optgroup.group("verbosity", cls=MutuallyExclusiveOptionGroup) # type: ignore
@optgroup.option( @optgroup.option( # type: ignore
"--loglevel", "--loglevel",
"verbosity", "verbosity",
type=int, type=int,
@ -69,7 +70,7 @@ def readin_list(_ctx, _param, value) -> list:
show_default=True, show_default=True,
help="Custom log level", help="Custom log level",
) )
@optgroup.option( @optgroup.option( # type: ignore
"--warn", "--warn",
"verbosity", "verbosity",
flag_value=30, flag_value=30,
@ -77,7 +78,7 @@ def readin_list(_ctx, _param, value) -> list:
show_default=True, show_default=True,
help="Only log warnings and higher", help="Only log warnings and higher",
) )
@optgroup.option( @optgroup.option( # type: ignore
"--debug", "--debug",
"verbosity", "verbosity",
flag_value=10, flag_value=10,
@ -227,12 +228,8 @@ def readin_list(_ctx, _param, value) -> list:
help="Enable/disable creation of metadata via ComicInfo.xml", help="Enable/disable creation of metadata via ComicInfo.xml",
) )
@click.pass_context @click.pass_context
def main(ctx: click.Context, **kwargs) -> None: def main(ctx: click.Context, **kwargs: Any) -> None:
""" """Script to download mangas from various sites."""
Script to download mangas from various sites
"""
url_uuid: str = kwargs.pop("url_uuid") url_uuid: str = kwargs.pop("url_uuid")
read_mangas: list[str] = kwargs.pop("read_mangas") read_mangas: list[str] = kwargs.pop("read_mangas")
verbosity: int = kwargs.pop("verbosity") verbosity: int = kwargs.pop("verbosity")

View file

@ -2,7 +2,7 @@ import logging
import shutil import shutil
from pathlib import Path from pathlib import Path
from time import sleep from time import sleep
from typing import Union from typing import List, Union
import requests import requests
from loguru import logger as log from loguru import logger as log
@ -12,7 +12,7 @@ from mangadlp import utils
# download images # download images
def download_chapter( def download_chapter(
image_urls: list, image_urls: List[str],
chapter_path: Union[str, Path], chapter_path: Union[str, Path],
download_wait: float, download_wait: float,
) -> None: ) -> None:
@ -48,8 +48,8 @@ def download_chapter(
# write image # write image
try: try:
with image_path.open("wb") as file: with image_path.open("wb") as file:
r.raw.decode_content = True r.raw.decode_content = True # pyright:ignore
shutil.copyfileobj(r.raw, file) shutil.copyfileobj(r.raw, file) # pyright:ignore
except Exception as exc: except Exception as exc:
log.error("Can't write file") log.error("Can't write file")
raise exc raise exc

View file

@ -1,11 +1,15 @@
import os import os
import subprocess import subprocess
from typing import Any
from loguru import logger as log from loguru import logger as log
def run_hook(command: str, hook_type: str, **kwargs) -> int: def run_hook(command: str, hook_type: str, **kwargs: Any) -> int:
""" """Run a command.
Run a command with subprocess.run and add kwargs to the environment.
Args: Args:
command (str): command to run command (str): command to run
hook_type (str): type of the hook hook_type (str): type of the hook
@ -14,7 +18,6 @@ def run_hook(command: str, hook_type: str, **kwargs) -> int:
Returns: Returns:
exit_code (int): exit code of command exit_code (int): exit code of command
""" """
# check if hook commands are empty # check if hook commands are empty
if not command or command == "None": if not command or command == "None":
log.debug(f"Hook '{hook_type}' empty. Not running") log.debug(f"Hook '{hook_type}' empty. Not running")

View file

@ -1,5 +1,6 @@
import logging import logging
import sys import sys
from typing import Any, Dict
from loguru import logger from loguru import logger
@ -8,11 +9,9 @@ LOGURU_FMT = "{time:%Y-%m-%dT%H:%M:%S%z} | <level>[{level: <7}]</level> [{name:
# from loguru docs # from loguru docs
class InterceptHandler(logging.Handler): class InterceptHandler(logging.Handler):
""" """Intercept python logging messages and log them via loguru.logger."""
Intercept python logging messages and log them via loguru.logger
"""
def emit(self, record): def emit(self, record: Any) -> None:
# Get corresponding Loguru level if it exists # Get corresponding Loguru level if it exists
try: try:
level = logger.level(record.levelname).name level = logger.level(record.levelname).name
@ -21,8 +20,8 @@ class InterceptHandler(logging.Handler):
# Find caller from where originated the logged message # Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2 frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__: while frame.f_code.co_filename == logging.__file__: # pyright:ignore
frame = frame.f_back frame = frame.f_back # type: ignore
depth += 1 depth += 1
logger.opt(depth=depth, exception=record.exc_info).log( logger.opt(depth=depth, exception=record.exc_info).log(
@ -32,7 +31,7 @@ class InterceptHandler(logging.Handler):
# init logger with format and log level # init logger with format and log level
def prepare_logger(loglevel: int = 20) -> None: def prepare_logger(loglevel: int = 20) -> None:
config: dict = { config: Dict[str, Any] = {
"handlers": [ "handlers": [
{ {
"sink": sys.stdout, "sink": sys.stdout,

View file

@ -1,14 +1,18 @@
from pathlib import Path from pathlib import Path
from typing import Any, Dict, Tuple from typing import Any, Dict, List, Tuple, Union
import xmltodict import xmltodict
from loguru import logger as log from loguru import logger as log
from mangadlp.types import ComicInfo
METADATA_FILENAME = "ComicInfo.xml" METADATA_FILENAME = "ComicInfo.xml"
METADATA_TEMPLATE = Path("mangadlp/metadata/ComicInfo_v2.0.xml") METADATA_TEMPLATE = Path("mangadlp/metadata/ComicInfo_v2.0.xml")
# define metadata types, defaults and valid values. an empty list means no value check # define metadata types, defaults and valid values. an empty list means no value check
# {key: (type, default value, valid values)} # {key: (type, default value, valid values)}
METADATA_TYPES: Dict[str, Tuple[type, Any, list]] = { METADATA_TYPES: Dict[
str, Tuple[Any, Union[str, int, None], List[Union[str, int, None]]]
] = {
"Title": (str, None, []), "Title": (str, None, []),
"Series": (str, None, []), "Series": (str, None, []),
"Number": (str, None, []), "Number": (str, None, []),
@ -59,10 +63,10 @@ METADATA_TYPES: Dict[str, Tuple[type, Any, list]] = {
} }
def validate_metadata(metadata_in: dict) -> Dict[str, dict]: def validate_metadata(metadata_in: ComicInfo) -> Dict[str, ComicInfo]:
log.info("Validating metadata") log.info("Validating metadata")
metadata_valid: dict[str, dict] = {"ComicInfo": {}} metadata_valid: dict[str, ComicInfo] = {"ComicInfo": {}}
for key, value in METADATA_TYPES.items(): for key, value in METADATA_TYPES.items():
metadata_type, metadata_default, metadata_validation = value metadata_type, metadata_default, metadata_validation = value
@ -75,7 +79,7 @@ def validate_metadata(metadata_in: dict) -> Dict[str, dict]:
# check if metadata key is available # check if metadata key is available
try: try:
md_to_check = metadata_in[key] md_to_check: Union[str, int, None] = metadata_in[key]
except KeyError: except KeyError:
continue continue
# check if provided metadata item is empty # check if provided metadata item is empty
@ -84,7 +88,7 @@ def validate_metadata(metadata_in: dict) -> Dict[str, dict]:
# check if metadata type is correct # check if metadata type is correct
log.debug(f"Key:{key} -> value={type(md_to_check)} -> check={metadata_type}") log.debug(f"Key:{key} -> value={type(md_to_check)} -> check={metadata_type}")
if not isinstance(md_to_check, metadata_type): # noqa if not isinstance(md_to_check, metadata_type):
log.warning( log.warning(
f"Metadata has wrong type: {key}:{metadata_type} -> {md_to_check}" f"Metadata has wrong type: {key}:{metadata_type} -> {md_to_check}"
) )
@ -104,8 +108,8 @@ def validate_metadata(metadata_in: dict) -> Dict[str, dict]:
return metadata_valid return metadata_valid
def write_metadata(chapter_path: Path, metadata: dict) -> None: def write_metadata(chapter_path: Path, metadata: ComicInfo) -> None:
if metadata["Format"] == "pdf": if metadata["Format"] == "pdf": # pyright:ignore
log.warning("Can't add metadata for pdf format. Skipping") log.warning("Can't add metadata for pdf format. Skipping")
return return

50
mangadlp/types.py Normal file
View file

@ -0,0 +1,50 @@
from typing import Optional, TypedDict
class ComicInfo(TypedDict, total=False):
"""ComicInfo.xml basic types.
Validation is done via metadata.validate_metadata()
All valid types and values are specified in metadata.METADATA_TYPES
"""
Title: Optional[str]
Series: Optional[str]
Number: Optional[str]
Count: Optional[int]
Volume: Optional[int]
AlternateSeries: Optional[str]
AlternateNumber: Optional[str]
AlternateCount: Optional[int]
Summary: Optional[str]
Notes: Optional[str]
Year: Optional[int]
Month: Optional[int]
Day: Optional[int]
Writer: Optional[str]
Colorist: Optional[str]
Publisher: Optional[str]
Genre: Optional[str]
Web: Optional[str]
PageCount: Optional[int]
LanguageISO: Optional[str]
Format: Optional[str]
BlackAndWhite: Optional[str]
Manga: Optional[str]
ScanInformation: Optional[str]
SeriesGroup: Optional[str]
AgeRating: Optional[str]
CommunityRating: Optional[int]
class ChapterData(TypedDict):
"""Basic chapter-data types.
All values have to be provided.
"""
uuid: str
volume: str
chapter: str
name: str
pages: int

View file

@ -24,7 +24,7 @@ def make_archive(chapter_path: Path, file_format: str) -> None:
def make_pdf(chapter_path: Path) -> None: def make_pdf(chapter_path: Path) -> None:
try: try:
import img2pdf # pylint: disable=import-outside-toplevel import img2pdf # pylint: disable=import-outside-toplevel # pyright:ignore
except Exception as exc: except Exception as exc:
log.error("Cant import img2pdf. Please install it first") log.error("Cant import img2pdf. Please install it first")
raise exc raise exc
@ -34,14 +34,14 @@ def make_pdf(chapter_path: Path) -> None:
for file in chapter_path.iterdir(): for file in chapter_path.iterdir():
images.append(str(file)) images.append(str(file))
try: try:
pdf_path.write_bytes(img2pdf.convert(images)) pdf_path.write_bytes(img2pdf.convert(images)) # pyright:ignore
except Exception as exc: except Exception as exc:
log.error("Can't create '.pdf' archive") log.error("Can't create '.pdf' archive")
raise exc raise exc
# create a list of chapters # create a list of chapters
def get_chapter_list(chapters: str, available_chapters: list) -> List[str]: def get_chapter_list(chapters: str, available_chapters: List[str]) -> List[str]:
# check if there are available chapter # check if there are available chapter
chapter_list: list[str] = [] chapter_list: list[str] = []
for chapter in chapters.split(","): for chapter in chapters.split(","):

View file

@ -9,14 +9,8 @@ description = "A cli manga downloader"
readme = "README.md" readme = "README.md"
license = "MIT" license = "MIT"
requires-python = ">=3.8" requires-python = ">=3.8"
authors = [ authors = [{ name = "Ivan Schaller", email = "ivan@schaller.sh" }]
{ name = "Ivan Schaller", email = "ivan@schaller.sh" }, keywords = ["manga", "downloader", "mangadex"]
]
keywords = [
"manga",
"downloader",
"mangadex",
]
classifiers = [ classifiers = [
"License :: OSI Approved :: MIT License", "License :: OSI Approved :: MIT License",
"Natural Language :: English", "Natural Language :: English",
@ -30,7 +24,7 @@ dependencies = [
"loguru>=0.6.0", "loguru>=0.6.0",
"click>=8.1.3", "click>=8.1.3",
"click-option-group>=0.5.5", "click-option-group>=0.5.5",
"xmltodict>=0.13.0" "xmltodict>=0.13.0",
] ]
[project.urls] [project.urls]
@ -69,40 +63,82 @@ dependencies = [
"pytest>=7.0.0", "pytest>=7.0.0",
"coverage>=6.3.1", "coverage>=6.3.1",
"black>=22.1.0", "black>=22.1.0",
"isort>=5.10.0",
"pylint>=2.13.0",
"mypy>=0.940", "mypy>=0.940",
"tox>=3.24.5", "tox>=3.24.5",
"autoflake>=1.4", "ruff>=0.0.247",
"pylama>=8.3.8",
] ]
[tool.isort] # pyright
py_version = 39
skip_gitignore = true
line_length = 88
profile = "black"
multi_line_output = 3
include_trailing_comma = true
use_parentheses = true
[tool.mypy] [tool.pyright]
python_version = "3.9" typeCheckingMode = "strict"
disallow_untyped_defs = false pythonVersion = "3.9"
follow_imports = "normal" reportUnnecessaryTypeIgnoreComment = true
ignore_missing_imports = true reportShadowedImports = true
warn_no_return = false reportUnusedExpression = true
warn_unused_ignores = true reportMatchNotExhaustive = true
show_error_context = true # venvPath = "."
show_column_numbers = true # venv = "venv"
show_error_codes = true
pretty = true # ruff
no_implicit_optional = false
[tool.ruff]
target-version = "py39"
select = [
"E", # pycodetyle err
"W", # pycodetyle warn
"D", # pydocstyle
"C90", # mccabe
"I", # isort
"PLE", # pylint err
"PLW", # pylint warn
"PLC", # pylint convention
"PLR", # pylint refactor
"F", # pyflakes
"RUF", # ruff specific
]
line-length = 88
fix = true
show-fixes = true
format = "grouped"
ignore-init-module-imports = true
respect-gitignore = true
ignore = ["E501", "D103", "D100", "D102", "PLR2004"]
exclude = [
".direnv",
".git",
".mypy_cache",
".ruff_cache",
".svn",
".venv",
"venv",
"__pypackages__",
"build",
"dist",
"venv",
]
[tool.ruff.per-file-ignores]
"__init__.py" = ["D104"]
[tool.ruff.pylint]
max-args = 10
[tool.ruff.mccabe]
max-complexity = 10
[tool.ruff.pydocstyle]
convention = "google"
[tool.ruff.pycodestyle]
max-doc-length = 88
# pytest
[tool.pytest.ini_options] [tool.pytest.ini_options]
pythonpath = [ pythonpath = ["."]
"."
] # coverage
[tool.coverage.run] [tool.coverage.run]
source = ["mangadlp"] source = ["mangadlp"]
@ -127,12 +163,3 @@ exclude_lines = [
"@(abc.)?abstractmethod", "@(abc.)?abstractmethod",
] ]
ignore_errors = true ignore_errors = true
[tool.pylint.main]
py-version = "3.9"
[tool.pylint.logging]
logging-modules = ["logging", "loguru"]
disable = "C0301, C0114, C0116, W0703, R0902, R0913, E0401, W1203"
good-names = "r"
logging-format-style = "new"

View file

@ -16,8 +16,6 @@ def test_read_and_url():
def test_no_read_and_url(): def test_no_read_and_url():
url_uuid = "https://mangadex.org/title/0aea9f43-d4a9-4bf7-bebc-550a512f9b95/shikimori-s-not-just-a-cutie"
link_file = "tests/testfile.txt"
language = "en" language = "en"
chapters = "1" chapters = "1"
file_format = "cbz" file_format = "cbz"
@ -30,7 +28,6 @@ def test_no_read_and_url():
def test_no_chaps(): def test_no_chaps():
url_uuid = "https://mangadex.org/title/0aea9f43-d4a9-4bf7-bebc-550a512f9b95/shikimori-s-not-just-a-cutie" url_uuid = "https://mangadex.org/title/0aea9f43-d4a9-4bf7-bebc-550a512f9b95/shikimori-s-not-just-a-cutie"
language = "en" language = "en"
chapters = ""
file_format = "cbz" file_format = "cbz"
download_path = "tests" download_path = "tests"
command_args = f"-u {url_uuid} -l {language} --path {download_path} --format {file_format} --debug" command_args = f"-u {url_uuid} -l {language} --path {download_path} --format {file_format} --debug"

View file

@ -40,7 +40,7 @@ def test_manga_pre_hook(wait_10s):
manga_pre_hook, manga_pre_hook,
] ]
script_path = "manga-dlp.py" script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0 assert subprocess.call(command) == 0
assert hook_file.is_file() assert hook_file.is_file()
@ -72,7 +72,7 @@ def test_manga_post_hook(wait_10s):
manga_post_hook, manga_post_hook,
] ]
script_path = "manga-dlp.py" script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0 assert subprocess.call(command) == 0
assert hook_file.is_file() assert hook_file.is_file()
@ -104,7 +104,7 @@ def test_chapter_pre_hook(wait_10s):
chapter_pre_hook, chapter_pre_hook,
] ]
script_path = "manga-dlp.py" script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0 assert subprocess.call(command) == 0
assert hook_file.is_file() assert hook_file.is_file()
@ -136,7 +136,7 @@ def test_chapter_post_hook(wait_10s):
chapter_post_hook, chapter_post_hook,
] ]
script_path = "manga-dlp.py" script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0 assert subprocess.call(command) == 0
assert hook_file.is_file() assert hook_file.is_file()
@ -176,7 +176,7 @@ def test_all_hooks(wait_10s):
chapter_post_hook, chapter_post_hook,
] ]
script_path = "manga-dlp.py" script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0 assert subprocess.call(command) == 0
assert Path("tests/manga-pre2.txt").is_file() assert Path("tests/manga-pre2.txt").is_file()

View file

@ -6,7 +6,7 @@ from mangadlp.cache import CacheDB
def test_cache_creation(): def test_cache_creation():
cache_file = Path("cache.json") cache_file = Path("cache.json")
cache = CacheDB(cache_file, "abc", "en", "test") CacheDB(cache_file, "abc", "en", "test")
assert cache_file.exists() assert cache_file.exists()
cache_file.unlink() cache_file.unlink()

View file

@ -133,7 +133,7 @@ def test_metadata_chapter_validity(wait_20s):
schema = xmlschema.XMLSchema("mangadlp/metadata/ComicInfo_v2.0.xsd") schema = xmlschema.XMLSchema("mangadlp/metadata/ComicInfo_v2.0.xsd")
script_path = "manga-dlp.py" script_path = "manga-dlp.py"
command = ["python3", script_path] + command_args command = ["python3", script_path, *command_args]
assert subprocess.call(command) == 0 assert subprocess.call(command) == 0
assert metadata_path.is_file() assert metadata_path.is_file()

View file

@ -56,7 +56,7 @@ def test_alt_title_fallback():
forcevol = False forcevol = False
test = Mangadex(url_uuid, language, forcevol) test = Mangadex(url_uuid, language, forcevol)
assert test.manga_title == "Iruma à lécole des démons" assert test.manga_title == "Iruma à lécole des démons" # noqa
def test_chapter_infos(): def test_chapter_infos():
@ -206,7 +206,6 @@ def test_get_chapter_images():
test = Mangadex(url_uuid, language, forcevol) test = Mangadex(url_uuid, language, forcevol)
img_base_url = "https://uploads.mangadex.org" img_base_url = "https://uploads.mangadex.org"
chapter_hash = "0752bc5db298beff6b932b9151dd8437" chapter_hash = "0752bc5db298beff6b932b9151dd8437"
chapter_uuid = "e86ec2c4-c5e4-4710-bfaa-7604f00939c7"
chapter_num = "1" chapter_num = "1"
test_list = [ test_list = [
f"{img_base_url}/data/{chapter_hash}/x1-0deb4c9bfedd5be49e0a90cfb17cf343888239898c9e7451d569c0b3ea2971f4.jpg", f"{img_base_url}/data/{chapter_hash}/x1-0deb4c9bfedd5be49e0a90cfb17cf343888239898c9e7451d569c0b3ea2971f4.jpg",

View file

@ -24,8 +24,3 @@ commands =
coverage erase coverage erase
coverage run coverage run
coverage xml -i coverage xml -i
[pylama]
format = pycodestyle
linters = mccabe,pycodestyle,pyflakes
ignore = E501,C901,C0301