Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1"""LICENSE 

2Copyright 2015 Hermann Krumrey <hermann@krumreyh.com> 

3 

4This file is part of toktokkie. 

5 

6toktokkie is free software: you can redistribute it and/or modify 

7it under the terms of the GNU General Public License as published by 

8the Free Software Foundation, either version 3 of the License, or 

9(at your option) any later version. 

10 

11toktokkie is distributed in the hope that it will be useful, 

12but WITHOUT ANY WARRANTY; without even the implied warranty of 

13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

14GNU General Public License for more details. 

15 

16You should have received a copy of the GNU General Public License 

17along with toktokkie. If not, see <http://www.gnu.org/licenses/>. 

18LICENSE""" 

19 

20import os 

21import json 

22import argparse 

23import requests 

24from typing import List, Dict 

25from toktokkie.Directory import Directory 

26from puffotter.graphql import GraphQlClient 

27from puffotter.os import makedirs, replace_illegal_ntfs_chars 

28from puffotter.prompt import prompt 

29from subprocess import Popen 

30from toktokkie.metadata.comic.Comic import Comic 

31from toktokkie.exceptions import MissingMetadata, InvalidMetadata 

32from toktokkie.commands.Command import Command 

33 

34 

35class MangaCreateCommand(Command): 

36 """ 

37 Class that encapsulates behaviour of the manga-create command 

38 """ 

39 

40 @classmethod 

41 def name(cls) -> str: 

42 """ 

43 :return: The command name 

44 """ 

45 return "manga-create" 

46 

47 @classmethod 

48 def help(cls) -> str: 

49 """ 

50 :return: The help message for the command 

51 """ 

52 return "Creates a new manga directory based on anilist/mangadex data" 

53 

54 @classmethod 

55 def prepare_parser(cls, parser: argparse.ArgumentParser): 

56 """ 

57 Prepares an argumentparser for this command 

58 :param parser: The parser to prepare 

59 :return: None 

60 """ 

61 parser.add_argument("urls", nargs="+", 

62 help="The anilist or mangadex URLS of the manga " 

63 "series to create") 

64 

65 def execute(self): 

66 """ 

67 Executes the commands 

68 :return: None 

69 """ 

70 titles = [] # type: List[str] 

71 ids = [] 

72 

73 for url in self.args.urls: 

74 urlparts = [x for x in url.split("/") if x] 

75 media_id = [x for x in urlparts if x.isdigit()][-1] 

76 for site in ["anilist", "mangadex"]: 

77 if urlparts[1].startswith(site): 

78 ids.append((site, media_id)) 

79 

80 for site, media_id in ids: 

81 

82 if site == "anilist": 

83 info = self.load_anilist_info(media_id) 

84 elif site == "mangadex": 

85 info = self.load_mangadex_info(media_id) 

86 else: 

87 continue 

88 

89 titles.append(info["title"]) 

90 self.prepare_directory(info["title"], info["cover"]) 

91 

92 title_ids = { 

93 "mangadex": [info["mangadex_id"]] 

94 } 

95 if info["anilist_id"] is not None: 

96 title_ids["anilist"] = [info["anilist_id"]] 

97 

98 metadata = Comic(info["title"], { 

99 "ids": title_ids, 

100 "special_chapters": [], 

101 "type": "comic" 

102 }) 

103 metadata.write() 

104 

105 update_cmd = "toktokkie update " 

106 for title in titles: 

107 update_cmd += "\"{}\" ".format(title) 

108 

109 print(update_cmd) 

110 

111 def load_anilist_info( 

112 self, anilist_id: str, prompt_for_mangadex: bool = True 

113 ) -> Dict[str, str]: 

114 client = GraphQlClient("https://graphql.anilist.co") 

115 query = """ 

116 query ($id: Int) { 

117 Media (id: $id) { 

118 title { 

119 romaji 

120 english 

121 } 

122 coverImage { 

123 large 

124 medium 

125 } 

126 } 

127 } 

128 """ 

129 data = client.query(query, {"id": anilist_id})["data"] 

130 

131 title = data["Media"]["title"]["english"] 

132 if title is None: 

133 title = data["Media"]["title"]["romaji"] 

134 title = replace_illegal_ntfs_chars(title) 

135 

136 cover_image = data["Media"]["coverImage"]["large"] 

137 cover_image = cover_image.replace("medium", "large") 

138 mangadex_id = self.get_ids(anilist_id, "anilist").get("mangadex") 

139 

140 if mangadex_id is None and prompt_for_mangadex: 

141 anilist_url = "https://anilist.co/manga/" + str(anilist_id) 

142 mangadex_search = "https://mangadex.org/quick_search/" + title 

143 print("Title:" + title) 

144 print("Anilist URL:" + anilist_url) 

145 print(mangadex_search) 

146 

147 mangadex_id = prompt("Mangadex ID/URL: ") 

148 if "https://mangadex.org/title/" in mangadex_id: 

149 mangadex_id = mangadex_id \ 

150 .split("https://mangadex.org/title/")[1] \ 

151 .split("/")[0] 

152 

153 anilist_info = { 

154 "title": title, 

155 "cover": cover_image, 

156 "mangadex_id": mangadex_id, 

157 "anilist_id": anilist_id 

158 } 

159 if mangadex_id is not None: 

160 anilist_info["mangadex_id"] = mangadex_id 

161 

162 return anilist_info 

163 

164 def load_mangadex_info(self, mangadex_id: str) -> Dict[str, str]: 

165 url = "https://mangadex.org/api/manga/" + mangadex_id 

166 data = json.loads(requests.get(url).text) 

167 

168 links = data["manga"].get("links") 

169 if links is not None: 

170 anilist_id = links.get("al") 

171 else: 

172 anilist_id = None 

173 

174 if anilist_id is not None: 

175 info = self.load_anilist_info(anilist_id, False) 

176 info["mangadex_id"] = mangadex_id 

177 else: 

178 info = { 

179 "title": data["manga"]["title"], 

180 "cover": "https://mangadex.org" + data["manga"]["cover_url"], 

181 "mangadex_id": mangadex_id, 

182 "anilist_id": "0" 

183 } 

184 

185 return info 

186 

187 # noinspection PyMethodMayBeStatic 

188 def get_ids(self, media_id: str, media_site: str) -> Dict[str, str]: 

189 url = f"https://otaku-info.eu/api/v1/media_ids/" \ 

190 f"{media_site}/manga/{media_id}" 

191 data = json.loads(requests.get(url).text) 

192 return data.get("data", {}) 

193 

194 # noinspection PyMethodMayBeStatic 

195 def prepare_directory(self, title: str, cover_url: str): 

196 makedirs(title) 

197 makedirs(os.path.join(title, "Main")) 

198 makedirs(os.path.join(title, ".meta/icons")) 

199 

200 try: 

201 Directory(title) 

202 except (MissingMetadata, InvalidMetadata): 

203 

204 main_icon = os.path.join(title, ".meta/icons/main.") 

205 ext = cover_url.rsplit(".", 1)[1] 

206 

207 img = requests.get( 

208 cover_url, headers={"User-Agent": "Mozilla/5.0"} 

209 ) 

210 if img.status_code >= 300: 

211 med_url = cover_url.replace("large", "medium") 

212 img = requests.get( 

213 med_url, headers={"User-Agent": "Mozilla/5.0"} 

214 ) 

215 with open(main_icon + ext, "wb") as f: 

216 f.write(img.content) 

217 

218 if ext != "png": 

219 Popen(["convert", main_icon + ext, main_icon + "png"]) \ 

220 .wait() 

221 os.remove(main_icon + ext) 

222 Popen([ 

223 "zip", "-j", 

224 os.path.join(title, "cover.cbz"), 

225 main_icon + "png" 

226 ]).wait()