aboutsummaryrefslogtreecommitdiff
path: root/main.py
blob: f2ef67f18a0b2bce370154216928fb9692834ba7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
#!/bin/python

import os
import argparse
import logging
from pathlib import Path
from extractors.job import DlJob
from utils import read_file

from utils import NoExtractorException

module_path = os.path.abspath(__file__)
ROOT = Path(module_path).parent

parser = argparse.ArgumentParser(description="Custom album downloader tool")
parser.add_argument('--url', help="link to the file to download")
parser.add_argument(
        '--update', help='update all albums from cache', action="store_true")
parser.add_argument('--file', help="read url from file")
parser.add_argument(
    '--output',
    help="folder where to put downloaded albums. "
    "Default to: " + str(ROOT) + "/out/",
    default=str(ROOT) + "/out/")
parser.add_argument('--extractor', help="name of the extractor")

args = parser.parse_args()

if not args.output.endswith("/"):
    args.output = args.output + "/"

if args.update:
    print('Updating from cache...')


    cache_file = Path(ROOT, '.urls_cache.txt')

    urls_cache = read_file(cache_file)

    urls_failed = []

    for url in urls_cache:
        try:
            args.extractor = url.split(',')[1]
        except IndexError:
            pass
        try:
            dl_job = DlJob(url, args.output, args.extractor)
            dl_job.run()
        except NoExtractorException as exc:
            logging.error(exc)

    if urls_failed:
        print("There was no extractors for the following urls:")
        for url_failed in urls_failed:
            print(url_failed)

if args.url:
    print('Downloading from url...')
    try:
        dl_job = DlJob(args.url, args.output, args.extractor)
        dl_job.run()
    except NoExtractorException as exc:
        logging.error(exc)

if args.file:
    print("Downloading from file...")

    urls = read_file(args.file)

    urls_failed = []

    for url in urls:
        if url:
            try:
                dl_job = DlJob(url, args.output, args.extractor)
                dl_job.run()
            except NoExtractorException as exc:
                logging.error(exc)
                urls_failed.append(exc)

    if urls_failed:
        print("There was no extractors for the following urls:")
        for url_failed in urls_failed:
            print(url_failed)

if not args.url and not args.update and not args.file:
    parser.print_help()