|
| 1 | +# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır. |
| 2 | + |
| 3 | +from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, HTMLHelper |
| 4 | +import urllib.parse |
| 5 | + |
| 6 | +class DiziWatch(PluginBase): |
| 7 | + name = "DiziWatch" |
| 8 | + language = "tr" |
| 9 | + main_url = "https://diziwatch.to" |
| 10 | + favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64" |
| 11 | + description = "Diziwatch; en güncel yabancı dizileri ve animeleri, Türkçe altyazılı ve dublaj seçenekleriyle izleyebileceğiniz platform." |
| 12 | + |
| 13 | + main_page = { |
| 14 | + f"{main_url}/episodes" : "Yeni Bölümler", |
| 15 | + "9" : "Aksiyon", |
| 16 | + "17" : "Animasyon", |
| 17 | + "5" : "Bilim Kurgu", |
| 18 | + "2" : "Dram", |
| 19 | + "12" : "Fantastik", |
| 20 | + "3" : "Gizem", |
| 21 | + "4" : "Komedi", |
| 22 | + "8" : "Korku", |
| 23 | + "24" : "Macera", |
| 24 | + "14" : "Müzik", |
| 25 | + "7" : "Romantik", |
| 26 | + "23" : "Spor", |
| 27 | + "1" : "Suç", |
| 28 | + } |
| 29 | + |
| 30 | + def __init__(self): |
| 31 | + super().__init__() |
| 32 | + self.c_key = None |
| 33 | + self.c_value = None |
| 34 | + |
| 35 | + async def _init_session(self): |
| 36 | + if self.c_key and self.c_value: |
| 37 | + return |
| 38 | + |
| 39 | + # Fetch anime-arsivi to get CSRF tokens |
| 40 | + resp = await self.httpx.get(f"{self.main_url}/anime-arsivi") |
| 41 | + sel = HTMLHelper(resp.text) |
| 42 | + |
| 43 | + # form.bg-[rgba(255,255,255,.15)] > input |
| 44 | + # We can just look for the first two inputs in that specific form |
| 45 | + inputs = sel.select("form.bg-\\[rgba\\(255\\,255\\,255\\,\\.15\\)\\] input") |
| 46 | + if len(inputs) >= 2: |
| 47 | + self.c_key = inputs[0].attrs.get("value") |
| 48 | + self.c_value = inputs[1].attrs.get("value") |
| 49 | + |
| 50 | + async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]: |
| 51 | + await self._init_session() |
| 52 | + |
| 53 | + if url.startswith("https://"): |
| 54 | + full_url = f"{url}?page={page}" |
| 55 | + resp = await self.httpx.get(full_url, headers={"Referer": f"{self.main_url}/"}) |
| 56 | + sel = HTMLHelper(resp.text) |
| 57 | + items = sel.select("div.swiper-slide a") |
| 58 | + else: |
| 59 | + # Category ID based |
| 60 | + full_url = f"{self.main_url}/anime-arsivi?category={url}&minImdb=&name=&release_year=&sort=date_desc&page={page}" |
| 61 | + resp = await self.httpx.get(full_url, headers={"Referer": f"{self.main_url}/"}) |
| 62 | + sel = HTMLHelper(resp.text) |
| 63 | + items = sel.select("div.content-inner a") |
| 64 | + |
| 65 | + results = [] |
| 66 | + for item in items: |
| 67 | + title = sel.select_text("h2", item) |
| 68 | + href = item.attrs.get("href") if item.tag == "a" else sel.select_attr("a", "href", item) |
| 69 | + poster = sel.select_attr("img", "src", item) or sel.select_attr("img", "data-src", item) |
| 70 | + |
| 71 | + if title and href: |
| 72 | + # If it's an episode link, clean it to get show link |
| 73 | + # Regex in Kotlin: /sezon-\d+/bolum-\d+/?$ |
| 74 | + clean_href = HTMLHelper(href).regex_replace(r"/sezon-\d+/bolum-\d+/?$", "") |
| 75 | + |
| 76 | + # If cleaning changed something, it was an episode link, maybe add it to title |
| 77 | + if clean_href != href: |
| 78 | + se_info = sel.select_text("div.flex.gap-1.items-center", item) |
| 79 | + if se_info: |
| 80 | + title = f"{title} - {se_info}" |
| 81 | + |
| 82 | + results.append(MainPageResult( |
| 83 | + category = category, |
| 84 | + title = title, |
| 85 | + url = self.fix_url(clean_href), |
| 86 | + poster = self.fix_url(poster) if poster else None |
| 87 | + )) |
| 88 | + |
| 89 | + return results |
| 90 | + |
| 91 | + async def search(self, query: str) -> list[SearchResult]: |
| 92 | + await self._init_session() |
| 93 | + |
| 94 | + post_url = f"{self.main_url}/bg/searchcontent" |
| 95 | + data = { |
| 96 | + "cKey" : self.c_key, |
| 97 | + "cValue" : self.c_value, |
| 98 | + "searchterm" : query |
| 99 | + } |
| 100 | + |
| 101 | + headers = { |
| 102 | + "X-Requested-With" : "XMLHttpRequest", |
| 103 | + "Accept" : "application/json, text/javascript, */*; q=0.01", |
| 104 | + "Referer" : f"{self.main_url}/" |
| 105 | + } |
| 106 | + |
| 107 | + resp = await self.httpx.post(post_url, data=data, headers=headers) |
| 108 | + |
| 109 | + try: |
| 110 | + raw = resp.json() |
| 111 | + # Kotlin maps this to ApiResponse -> DataWrapper -> Icerikler |
| 112 | + res_array = raw.get("data", {}).get("result", []) |
| 113 | + |
| 114 | + results = [] |
| 115 | + for item in res_array: |
| 116 | + title = item.get("object_name", "").replace("\\", "") |
| 117 | + slug = item.get("used_slug", "").replace("\\", "") |
| 118 | + poster = item.get("object_poster_url", "") |
| 119 | + |
| 120 | + # Cleanup poster URL as in Kotlin |
| 121 | + if poster: |
| 122 | + poster = poster.replace("images-macellan-online.cdn.ampproject.org/i/s/", "") \ |
| 123 | + .replace("file.dizilla.club", "file.macellan.online") \ |
| 124 | + .replace("images.dizilla.club", "images.macellan.online") \ |
| 125 | + .replace("images.dizimia4.com", "images.macellan.online") \ |
| 126 | + .replace("file.dizimia4.com", "file.macellan.online") |
| 127 | + poster = HTMLHelper(poster).regex_replace(r"(file\.)[\w\.]+\/?", r"\1macellan.online/") |
| 128 | + poster = HTMLHelper(poster).regex_replace(r"(images\.)[\w\.]+\/?", r"\1macellan.online/") |
| 129 | + poster = poster.replace("/f/f/", "/630/910/") |
| 130 | + |
| 131 | + if title and slug: |
| 132 | + results.append(SearchResult( |
| 133 | + title = title, |
| 134 | + url = self.fix_url(slug), |
| 135 | + poster = self.fix_url(poster) if poster else None |
| 136 | + )) |
| 137 | + return results |
| 138 | + except Exception: |
| 139 | + return [] |
| 140 | + |
| 141 | + async def load_item(self, url: str) -> SeriesInfo: |
| 142 | + resp = await self.httpx.get(url) |
| 143 | + sel = HTMLHelper(resp.text) |
| 144 | + |
| 145 | + title = sel.select_text("h2") |
| 146 | + poster = sel.select_attr("img.rounded-md", "src") |
| 147 | + description = sel.select_text("div.text-sm") |
| 148 | + |
| 149 | + year = sel.regex_first(r"Yap\u0131m Y\u0131l\u0131\s*:\s*(\d+)", resp.text) |
| 150 | + |
| 151 | + tags = [] |
| 152 | + tags_raw = sel.regex_first(r"T\u00fcr\s*:\s*([^<]+)", resp.text) |
| 153 | + if tags_raw: |
| 154 | + tags = [t.strip() for t in tags_raw.split(",")] |
| 155 | + |
| 156 | + rating = sel.select_text(".font-semibold.text-white") |
| 157 | + if rating: |
| 158 | + rating = rating.replace(",", ".").strip() |
| 159 | + |
| 160 | + actors = [a.text(strip=True) for a in sel.select("span.valor a")] |
| 161 | + |
| 162 | + trailer_match = sel.regex_first(r"embed\/(.*)\?rel", resp.text) |
| 163 | + trailer = f"https://www.youtube.com/embed/{trailer_match}" if trailer_match else None |
| 164 | + |
| 165 | + duration_text = sel.select_text("span.runtime") |
| 166 | + duration = duration_text.split(" ")[0] if duration_text else None |
| 167 | + |
| 168 | + episodes = [] |
| 169 | + # ul a handles episodes |
| 170 | + for ep_link in sel.select("ul a"): |
| 171 | + href = ep_link.attrs.get("href") |
| 172 | + if not href or "/sezon-" not in href: |
| 173 | + continue |
| 174 | + |
| 175 | + ep_name = sel.select_text("span.hidden.sm\\:block", ep_link) |
| 176 | + |
| 177 | + season_match = sel.regex_first(r"sezon-(\d+)", href) |
| 178 | + episode_match = sel.regex_first(r"bolum-(\d+)", href) |
| 179 | + |
| 180 | + season = season_match if season_match else None |
| 181 | + episode_num = episode_match if episode_match else None |
| 182 | + |
| 183 | + episodes.append(Episode( |
| 184 | + season = int(season) if season and season.isdigit() else None, |
| 185 | + episode = int(episode_num) if episode_num and episode_num.isdigit() else None, |
| 186 | + title = ep_name if ep_name else f"{season}x{episode_num}", |
| 187 | + url = self.fix_url(href) |
| 188 | + )) |
| 189 | + |
| 190 | + return SeriesInfo( |
| 191 | + title = title, |
| 192 | + url = url, |
| 193 | + poster = self.fix_url(poster) if poster else None, |
| 194 | + description = description, |
| 195 | + rating = rating, |
| 196 | + tags = tags, |
| 197 | + actors = actors, |
| 198 | + year = year, |
| 199 | + episodes = episodes, |
| 200 | + duration = int(duration) if duration and str(duration).isdigit() else None |
| 201 | + ) |
| 202 | + |
| 203 | + async def load_links(self, url: str) -> list[ExtractResult]: |
| 204 | + resp = await self.httpx.get(url) |
| 205 | + sel = HTMLHelper(resp.text) |
| 206 | + |
| 207 | + iframe = sel.select_attr("iframe", "src") |
| 208 | + if not iframe: |
| 209 | + return [] |
| 210 | + |
| 211 | + iframe_url = self.fix_url(iframe) |
| 212 | + data = await self.extract(iframe_url, referer=f"{self.main_url}/") |
| 213 | + |
| 214 | + if not data: |
| 215 | + return [] |
| 216 | + |
| 217 | + return data if isinstance(data, list) else [data] |
0 commit comments